repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
philanthropy-u/edx-platform | cms/djangoapps/contentstore/views/tests/test_unit_page.py | 222 | 2775 | """
Unit tests for the unit page.
"""
from contentstore.views.tests.utils import StudioPageTestCase
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import ItemFactory
from xmodule.x_module import STUDENT_VIEW
class UnitPageTestCase(StudioPageTestCase):
"""
Unit tests for the unit page.
"""
def setUp(self):
super(UnitPageTestCase, self).setUp()
self.vertical = ItemFactory.create(parent_location=self.sequential.location,
category='vertical', display_name='Unit')
self.video = ItemFactory.create(parent_location=self.vertical.location,
category="video", display_name="My Video")
self.store = modulestore()
def test_public_component_preview_html(self):
"""
Verify that a public xblock's preview returns the expected HTML.
"""
published_video = self.store.publish(self.video.location, self.user.id)
self.validate_preview_html(self.video, STUDENT_VIEW, can_add=False)
def test_draft_component_preview_html(self):
"""
Verify that a draft xblock's preview returns the expected HTML.
"""
self.validate_preview_html(self.video, STUDENT_VIEW, can_add=False)
def test_public_child_container_preview_html(self):
"""
Verify that a public child container rendering on the unit page (which shows a View arrow
to the container page) returns the expected HTML.
"""
child_container = ItemFactory.create(parent_location=self.vertical.location,
category='split_test', display_name='Split Test')
ItemFactory.create(parent_location=child_container.location,
category='html', display_name='grandchild')
published_child_container = self.store.publish(child_container.location, self.user.id)
self.validate_preview_html(published_child_container, STUDENT_VIEW, can_add=False)
def test_draft_child_container_preview_html(self):
"""
Verify that a draft child container rendering on the unit page (which shows a View arrow
to the container page) returns the expected HTML.
"""
child_container = ItemFactory.create(parent_location=self.vertical.location,
category='split_test', display_name='Split Test')
ItemFactory.create(parent_location=child_container.location,
category='html', display_name='grandchild')
draft_child_container = self.store.get_item(child_container.location)
self.validate_preview_html(draft_child_container, STUDENT_VIEW, can_add=False)
| agpl-3.0 |
axbaretto/beam | sdks/python/.tox/py27gcp/lib/python2.7/encodings/charmap.py | 860 | 2084 | """ Generic Python Character Mapping Codec.
Use this codec directly rather than through the automatic
conversion mechanisms supplied by unicode() and .encode().
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.charmap_encode
decode = codecs.charmap_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict', mapping=None):
codecs.IncrementalEncoder.__init__(self, errors)
self.mapping = mapping
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, self.mapping)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict', mapping=None):
codecs.IncrementalDecoder.__init__(self, errors)
self.mapping = mapping
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, self.mapping)[0]
class StreamWriter(Codec,codecs.StreamWriter):
def __init__(self,stream,errors='strict',mapping=None):
codecs.StreamWriter.__init__(self,stream,errors)
self.mapping = mapping
def encode(self,input,errors='strict'):
return Codec.encode(input,errors,self.mapping)
class StreamReader(Codec,codecs.StreamReader):
def __init__(self,stream,errors='strict',mapping=None):
codecs.StreamReader.__init__(self,stream,errors)
self.mapping = mapping
def decode(self,input,errors='strict'):
return Codec.decode(input,errors,self.mapping)
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='charmap',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| apache-2.0 |
tsuru/varnishapi | tests/test_vcl_writer.py | 1 | 7407 | # Copyright 2014 varnishapi authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import threading
import time
import unittest
import mock
from feaas import storage
from feaas.runners import vcl_writer
class VCLWriterTestCase(unittest.TestCase):
def test_init(self):
strg = storage.MongoDBStorage()
manager = mock.Mock(storage=strg)
writer = vcl_writer.VCLWriter(manager, interval=10, max_items=3)
self.assertEqual(manager, writer.manager)
self.assertEqual(strg, writer.storage)
self.assertEqual(10, writer.interval)
self.assertEqual(3, writer.max_items)
writer.locker.lock(vcl_writer.UNITS_LOCKER)
writer.locker.unlock(vcl_writer.UNITS_LOCKER)
writer.locker.lock(vcl_writer.BINDS_LOCKER)
writer.locker.unlock(vcl_writer.BINDS_LOCKER)
def test_loop(self):
strg = mock.Mock()
manager = mock.Mock(storage=strg)
fake_run = mock.Mock()
writer = vcl_writer.VCLWriter(manager, interval=3, max_items=3)
writer.run = fake_run
writer.locker = mock.Mock()
t = threading.Thread(target=writer.loop)
t.start()
time.sleep(1)
writer.stop()
t.join()
fake_run.assert_called_once()
def test_stop(self):
manager = mock.Mock(storage=mock.Mock())
writer = vcl_writer.VCLWriter(manager)
writer.running = True
writer.stop()
self.assertFalse(writer.running)
def test_run(self):
manager = mock.Mock(storage=mock.Mock())
writer = vcl_writer.VCLWriter(manager)
writer.run_units = mock.Mock()
writer.run_binds = mock.Mock()
writer.run()
writer.run_units.assert_called_once()
writer.run_binds.assert_called_once()
def test_run_units(self):
units = [storage.Unit(dns_name="instance1.cloud.tsuru.io", id="i-0800"),
storage.Unit(dns_name="instance2.cloud.tsuru.io", id="i-0801"),
storage.Unit(dns_name="instance3.cloud.tsuru.io", id="i-0802")]
strg = mock.Mock()
strg.retrieve_units.return_value = units
manager = mock.Mock(storage=strg)
writer = vcl_writer.VCLWriter(manager, max_items=3)
writer._is_unit_up = lambda unit: unit == units[1]
writer.bind_units = mock.Mock()
writer.locker = mock.Mock()
writer.run_units()
writer.locker.lock.assert_called_with(vcl_writer.UNITS_LOCKER)
strg.retrieve_units.assert_called_with(state="creating", limit=3)
writer.locker.unlock.assert_called_with(vcl_writer.UNITS_LOCKER)
writer.bind_units.assert_called_with([units[1]])
strg.update_units.assert_called_with([units[1]], state="started")
def test_bind_units(self):
instance1 = storage.Instance(name="myinstance")
instance2 = storage.Instance(name="yourinstance")
units = [storage.Unit(dns_name="instance1-1.cloud.tsuru.io", id="i-0800",
instance=instance1, secret="abc123"),
storage.Unit(dns_name="instance1-2.cloud.tsuru.io", id="i-0801",
instance=instance1, secret="abc321"),
storage.Unit(dns_name="instance2-1.cloud.tsuru.io", id="i-0802",
instance=instance2, secret="abc456")]
strg = mock.Mock()
strg.retrieve_units.return_value = units
strg.retrieve_binds.return_value = [storage.Bind("myapp.cloud.tsuru.io",
instance1)]
manager = mock.Mock(storage=strg)
writer = vcl_writer.VCLWriter(manager, max_items=3)
writer.bind_units(units)
expected_calls = [mock.call(instance_name="myinstance", state="created"),
mock.call(instance_name="yourinstance", state="created")]
self.assertEqual(expected_calls, strg.retrieve_binds.call_args_list)
expected_calls = [mock.call("instance1-1.cloud.tsuru.io", "abc123",
"myapp.cloud.tsuru.io"),
mock.call("instance1-2.cloud.tsuru.io", "abc321",
"myapp.cloud.tsuru.io"),
mock.call("instance2-1.cloud.tsuru.io", "abc456",
"myapp.cloud.tsuru.io")]
self.assertEqual(expected_calls, manager.write_vcl.call_args_list)
@mock.patch("telnetlib.Telnet")
def test_is_unit_up_up(self, Telnet):
telnet_client = mock.Mock()
Telnet.return_value = telnet_client
unit = storage.Unit(dns_name="instance1.cloud.tsuru.io")
manager = mock.Mock(storage=mock.Mock())
writer = vcl_writer.VCLWriter(manager, max_items=3)
self.assertTrue(writer._is_unit_up(unit))
Telnet.assert_called_with(unit.dns_name, "6082", timeout=3)
telnet_client.close.assert_called_once()
@mock.patch("telnetlib.Telnet")
def test_is_unit_up_down(self, Telnet):
Telnet.side_effect = ValueError()
unit = storage.Unit(dns_name="instance1.cloud.tsuru.io")
manager = mock.Mock(storage=storage.MongoDBStorage())
writer = vcl_writer.VCLWriter(manager, max_items=3)
self.assertFalse(writer._is_unit_up(unit))
Telnet.assert_called_with(unit.dns_name, "6082", timeout=3)
def test_run_binds(self):
units = [storage.Unit(id="i-0800", dns_name="unit1.cloud.tsuru.io",
secret="abc123", state="started"),
storage.Unit(id="i-8001", dns_name="unit2.cloud.tsuru.io",
secret="abc321", state="started")]
instance1 = storage.Instance(name="wat", units=units)
instance2 = storage.Instance(name="wet", units=units)
binds = [storage.Bind(instance=instance1, app_host="cool", state="creating"),
storage.Bind(instance=instance2, app_host="bool", state="creating")]
strg = mock.Mock()
strg.retrieve_units.return_value = units
strg.retrieve_binds.return_value = binds
manager = mock.Mock(storage=strg)
writer = vcl_writer.VCLWriter(manager, max_items=3)
writer.locker = mock.Mock()
writer.run_binds()
writer.locker.lock.assert_called_with(vcl_writer.BINDS_LOCKER)
writer.locker.unlock.assert_called_with(vcl_writer.BINDS_LOCKER)
strg.retrieve_units.assert_called_once_with(state="started",
instance_name={"$in": ["wat", "wet"]})
strg.retrieve_binds.assert_called_once_with(state="creating", limit=3)
expected_write_vcl_calls = [mock.call("unit1.cloud.tsuru.io", "abc123", "cool"),
mock.call("unit2.cloud.tsuru.io", "abc321", "cool"),
mock.call("unit1.cloud.tsuru.io", "abc123", "bool"),
mock.call("unit2.cloud.tsuru.io", "abc321", "bool")]
self.assertEqual(expected_write_vcl_calls, manager.write_vcl.call_args_list)
expected_update_bind_calls = [mock.call(binds[0], state="created"),
mock.call(binds[1], state="created")]
self.assertEqual(expected_update_bind_calls, strg.update_bind.call_args_list)
| bsd-3-clause |
jmschrei/scikit-learn | examples/manifold/plot_mds.py | 45 | 2731 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
s = 100
plt.scatter(X_true[:, 0], X_true[:, 1], color='navy', s=s, lw=0,
label='True Position')
plt.scatter(pos[:, 0], pos[:, 1], color='turquoise', s=s, lw=0, label='MDS')
plt.scatter(npos[:, 0], npos[:, 1], color='darkorange', s=s, lw=0, label='NMDS')
plt.legend(scatterpoints=1, loc='best', shadow=False)
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.Blues,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
jazkarta/edx-platform | common/djangoapps/track/middleware.py | 37 | 7887 | """
This is a middleware layer which keeps a log of all requests made
to the server. It is responsible for removing security tokens and
similar from such events, and relaying them to the event tracking
framework.
"""
import hashlib
import hmac
import json
import logging
import re
import sys
from django.conf import settings
from track import views
from track import contexts
from eventtracking import tracker
log = logging.getLogger(__name__)
CONTEXT_NAME = 'edx.request'
META_KEY_TO_CONTEXT_KEY = {
'REMOTE_ADDR': 'ip',
'SERVER_NAME': 'host',
'HTTP_USER_AGENT': 'agent',
'PATH_INFO': 'path',
# Not a typo. See:
# http://en.wikipedia.org/wiki/HTTP_referer#Origin_of_the_term_referer
'HTTP_REFERER': 'referer',
'HTTP_ACCEPT_LANGUAGE': 'accept_language',
}
class TrackMiddleware(object):
"""
Tracks all requests made, as well as setting up context for other server
emitted events.
"""
def process_request(self, request):
try:
self.enter_request_context(request)
if not self.should_process_request(request):
return
# Removes passwords from the tracking logs
# WARNING: This list needs to be changed whenever we change
# password handling functionality.
#
# As of the time of this comment, only 'password' is used
# The rest are there for future extension.
#
# Passwords should never be sent as GET requests, but
# this can happen due to older browser bugs. We censor
# this too.
#
# We should manually confirm no passwords make it into log
# files when we change this.
censored_strings = ['password', 'newpassword', 'new_password',
'oldpassword', 'old_password', 'new_password1', 'new_password2']
post_dict = dict(request.POST)
get_dict = dict(request.GET)
for string in censored_strings:
if string in post_dict:
post_dict[string] = '*' * 8
if string in get_dict:
get_dict[string] = '*' * 8
event = {
'GET': dict(get_dict),
'POST': dict(post_dict),
}
# TODO: Confirm no large file uploads
event = json.dumps(event)
event = event[:512]
views.server_track(request, request.META['PATH_INFO'], event)
except:
## Why do we have the overly broad except?
##
## I added instrumentation so if we drop events on the
## floor, we at least know about it. However, we really
## should just return a 500 here: (1) This will translate
## to much more insidious user-facing bugs if we make any
## decisions based on incorrect data. (2) If the system
## is down, we should fail and fix it.
event = {'event-type': 'exception', 'exception': repr(sys.exc_info()[0])}
try:
views.server_track(request, request.META['PATH_INFO'], event)
except:
# At this point, things are really broken. We really
# should fail return a 500 to the user here. However,
# the interim decision is to just fail in order to be
# consistent with current policy, and expedite the PR.
# This version of the code makes no compromises
# relative to the code before, while a proper failure
# here would involve shifting compromises and
# discussion.
pass
def should_process_request(self, request):
"""Don't track requests to the specified URL patterns"""
path = request.META['PATH_INFO']
ignored_url_patterns = getattr(settings, 'TRACKING_IGNORE_URL_PATTERNS', [])
for pattern in ignored_url_patterns:
# Note we are explicitly relying on python's internal caching of
# compiled regular expressions here.
if re.match(pattern, path):
return False
return True
def enter_request_context(self, request):
"""
Extract information from the request and add it to the tracking
context.
The following fields are injected into the context:
* session - The Django session key that identifies the user's session.
* user_id - The numeric ID for the logged in user.
* username - The username of the logged in user.
* ip - The IP address of the client.
* host - The "SERVER_NAME" header, which should be the name of the server running this code.
* agent - The client browser identification string.
* path - The path part of the requested URL.
* client_id - The unique key used by Google Analytics to identify a user
"""
context = {
'session': self.get_session_key(request),
'user_id': self.get_user_primary_key(request),
'username': self.get_username(request),
}
for header_name, context_key in META_KEY_TO_CONTEXT_KEY.iteritems():
context[context_key] = request.META.get(header_name, '')
# Google Analytics uses the clientId to keep track of unique visitors. A GA cookie looks like
# this: _ga=GA1.2.1033501218.1368477899. The clientId is this part: 1033501218.1368477899.
google_analytics_cookie = request.COOKIES.get('_ga')
if google_analytics_cookie is None:
context['client_id'] = None
else:
context['client_id'] = '.'.join(google_analytics_cookie.split('.')[2:])
context.update(contexts.course_context_from_url(request.build_absolute_uri()))
tracker.get_tracker().enter_context(
CONTEXT_NAME,
context
)
def get_session_key(self, request):
""" Gets and encrypts the Django session key from the request or an empty string if it isn't found."""
try:
return self.encrypt_session_key(request.session.session_key)
except AttributeError:
return ''
def encrypt_session_key(self, session_key):
"""Encrypts a Django session key to another 32-character hex value."""
if not session_key:
return ''
# Follow the model of django.utils.crypto.salted_hmac() and
# django.contrib.sessions.backends.base._hash() but use MD5
# instead of SHA1 so that the result has the same length (32)
# as the original session_key.
# TODO: Switch to SHA224, which is secure.
# If necessary, drop the last little bit of the hash to make it the same length.
# Using a known-insecure hash to shorten is silly.
# Also, why do we need same length?
key_salt = "common.djangoapps.track" + self.__class__.__name__
key = hashlib.md5(key_salt + settings.SECRET_KEY).digest()
encrypted_session_key = hmac.new(key, msg=session_key, digestmod=hashlib.md5).hexdigest()
return encrypted_session_key
def get_user_primary_key(self, request):
"""Gets the primary key of the logged in Django user"""
try:
return request.user.pk
except AttributeError:
return ''
def get_username(self, request):
"""Gets the username of the logged in Django user"""
try:
return request.user.username
except AttributeError:
return ''
def process_response(self, _request, response):
"""Exit the context if it exists."""
try:
tracker.get_tracker().exit_context(CONTEXT_NAME)
except Exception: # pylint: disable=broad-except
pass
return response
| agpl-3.0 |
nixonpjoshua/JNixonHelloContigsFilter | scripts/prepare_deploy_cfg.py | 8 | 2151 | import sys
import os
import os.path
from jinja2 import Template
from ConfigParser import ConfigParser
import StringIO
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: <program> <deploy_cfg_template_file> <file_with_properties>")
print("Properties from <file_with_properties> will be applied to <deploy_cfg_template_file>")
print("template which will be overwritten with .orig copy saved in the same folder first.")
sys.exit(1)
file = open(sys.argv[1], 'r')
text = file.read()
t = Template(text)
config = ConfigParser()
if os.path.isfile(sys.argv[2]):
config.read(sys.argv[2])
elif "KBASE_ENDPOINT" in os.environ:
kbase_endpoint = os.environ.get("KBASE_ENDPOINT")
props = "[global]\n" + \
"kbase_endpoint = " + kbase_endpoint + "\n" + \
"job_service_url = " + kbase_endpoint + "/userandjobstate\n" + \
"workspace_url = " + kbase_endpoint + "/ws\n" + \
"shock_url = " + kbase_endpoint + "/shock-api\n" + \
"handle_url = " + kbase_endpoint + "/handle_service\n" + \
"srv_wiz_url = " + kbase_endpoint + "/service_wizard\n" + \
"njsw_url = " + kbase_endpoint + "/njs_wrapper\n"
if "AUTH_SERVICE_URL" in os.environ:
props += "auth_service_url = " + os.environ.get("AUTH_SERVICE_URL") + "\n"
props += "auth_service_url_allow_insecure = " + \
os.environ.get("AUTH_SERVICE_URL_ALLOW_INSECURE", "false") + "\n"
for key in os.environ:
if key.startswith('KBASE_SECURE_CONFIG_PARAM_'):
param_name = key[len('KBASE_SECURE_CONFIG_PARAM_'):]
props += param_name + " = " + os.environ.get(key) + "\n"
config.readfp(StringIO.StringIO(props))
else:
raise ValueError('Neither ' + sys.argv[2] + ' file nor KBASE_ENDPOINT env-variable found')
props = dict(config.items("global"))
output = t.render(props)
with open(sys.argv[1] + ".orig", 'w') as f:
f.write(text)
with open(sys.argv[1], 'w') as f:
f.write(output)
| mit |
halvorlu/rbtools | rbtools/clients/plastic.py | 5 | 10406 | import logging
import os
import re
from rbtools.clients import SCMClient, RepositoryInfo
from rbtools.clients.errors import (InvalidRevisionSpecError,
TooManyRevisionsError)
from rbtools.utils.checks import check_install
from rbtools.utils.filesystem import make_tempfile
from rbtools.utils.process import die, execute
class PlasticClient(SCMClient):
"""
A wrapper around the cm Plastic tool that fetches repository
information and generates compatible diffs
"""
name = 'Plastic'
supports_patch_revert = True
REVISION_CHANGESET_PREFIX = 'cs:'
def __init__(self, **kwargs):
super(PlasticClient, self).__init__(**kwargs)
def get_repository_info(self):
if not check_install(['cm', 'version']):
logging.debug('Unable to execute "cm version": skipping Plastic')
return None
# Get the workspace directory, so we can strip it from the diff output
self.workspacedir = execute(["cm", "gwp", ".", "--format={1}"],
split_lines=False,
ignore_errors=True).strip()
logging.debug("Workspace is %s" % self.workspacedir)
# Get the repository that the current directory is from
split = execute(["cm", "ls", self.workspacedir, "--format={8}"],
split_lines=True, ignore_errors=True)
# remove blank lines
split = [x for x in split if x]
m = re.search(r'^rep:(.+)$', split[0], re.M)
if not m:
return None
path = m.group(1)
return RepositoryInfo(path,
supports_changesets=True,
supports_parent_diffs=False)
def parse_revision_spec(self, revisions=[]):
"""Parses the given revision spec.
The 'revisions' argument is a list of revisions as specified by the
user. Items in the list do not necessarily represent a single revision,
since the user can use SCM-native syntaxes such as "r1..r2" or "r1:r2".
SCMTool-specific overrides of this method are expected to deal with
such syntaxes.
This will return a dictionary with the following keys:
'base': Always None.
'tip': A revision string representing either a changeset or a
branch.
These will be used to generate the diffs to upload to Review Board (or
print). The Plastic implementation requires that one and only one
revision is passed in. The diff for review will include the changes in
the given changeset or branch.
"""
n_revisions = len(revisions)
if n_revisions == 0:
raise InvalidRevisionSpecError(
'Either a changeset or a branch must be specified')
elif n_revisions == 1:
return {
'base': None,
'tip': revisions[0],
}
else:
raise TooManyRevisionsError
def diff(self, revisions, include_files=[], exclude_patterns=[],
extra_args=[]):
"""
Performs a diff across all modified files in a Plastic workspace
Parent diffs are not supported (the second value in the tuple).
"""
# TODO: use 'files'
changenum = None
tip = revisions['tip']
if tip.startswith(self.REVISION_CHANGESET_PREFIX):
logging.debug('Doing a diff against changeset %s', tip)
try:
changenum = str(int(
tip[len(self.REVISION_CHANGESET_PREFIX):]))
except ValueError:
pass
else:
logging.debug('Doing a diff against branch %s', tip)
if not getattr(self.options, 'branch', None):
self.options.branch = tip
diff_entries = execute(
['cm', 'diff', tip, '--format={status} {path} rev:revid:{revid} '
'rev:revid:{parentrevid} src:{srccmpath} '
'dst:{dstcmpath}{newline}'],
split_lines=True)
logging.debug('Got files: %s', diff_entries)
diff = self._process_diffs(diff_entries)
return {
'diff': diff,
'changenum': changenum,
}
def _process_diffs(self, my_diff_entries):
# Diff generation based on perforce client
diff_lines = []
empty_filename = make_tempfile()
tmp_diff_from_filename = make_tempfile()
tmp_diff_to_filename = make_tempfile()
for f in my_diff_entries:
f = f.strip()
if not f:
continue
m = re.search(r'(?P<type>[ACMD]) (?P<file>.*) '
r'(?P<revspec>rev:revid:[-\d]+) '
r'(?P<parentrevspec>rev:revid:[-\d]+) '
r'src:(?P<srcpath>.*) '
r'dst:(?P<dstpath>.*)$',
f)
if not m:
die("Could not parse 'cm log' response: %s" % f)
changetype = m.group("type")
filename = m.group("file")
if changetype == "M":
# Handle moved files as a delete followed by an add.
# Clunky, but at least it works
oldfilename = m.group("srcpath")
oldspec = m.group("revspec")
newfilename = m.group("dstpath")
newspec = m.group("revspec")
self._write_file(oldfilename, oldspec, tmp_diff_from_filename)
dl = self._diff_files(tmp_diff_from_filename, empty_filename,
oldfilename, "rev:revid:-1", oldspec,
changetype)
diff_lines += dl
self._write_file(newfilename, newspec, tmp_diff_to_filename)
dl = self._diff_files(empty_filename, tmp_diff_to_filename,
newfilename, newspec, "rev:revid:-1",
changetype)
diff_lines += dl
else:
newrevspec = m.group("revspec")
parentrevspec = m.group("parentrevspec")
logging.debug("Type %s File %s Old %s New %s"
% (changetype, filename, parentrevspec,
newrevspec))
old_file = new_file = empty_filename
if (changetype in ['A'] or
(changetype in ['C'] and parentrevspec == "rev:revid:-1")):
# There's only one content to show
self._write_file(filename, newrevspec,
tmp_diff_to_filename)
new_file = tmp_diff_to_filename
elif changetype in ['C']:
self._write_file(filename, parentrevspec,
tmp_diff_from_filename)
old_file = tmp_diff_from_filename
self._write_file(filename, newrevspec,
tmp_diff_to_filename)
new_file = tmp_diff_to_filename
elif changetype in ['D']:
self._write_file(filename, parentrevspec,
tmp_diff_from_filename)
old_file = tmp_diff_from_filename
else:
die("Don't know how to handle change type '%s' for %s" %
(changetype, filename))
dl = self._diff_files(old_file, new_file, filename,
newrevspec, parentrevspec, changetype)
diff_lines += dl
os.unlink(empty_filename)
os.unlink(tmp_diff_from_filename)
os.unlink(tmp_diff_to_filename)
return ''.join(diff_lines)
def _diff_files(self, old_file, new_file, filename, newrevspec,
parentrevspec, changetype):
"""
Do the work of producing a diff for Plastic (based on the Perforce one)
old_file - The absolute path to the "old" file.
new_file - The absolute path to the "new" file.
filename - The file in the Plastic workspace
newrevspec - The revid spec of the changed file
parentrevspecspec - The revision spec of the "old" file
changetype - The change type as a single character string
Returns a list of strings of diff lines.
"""
if filename.startswith(self.workspacedir):
filename = filename[len(self.workspacedir):]
diff_cmd = ["diff", "-urN", old_file, new_file]
# Diff returns "1" if differences were found.
dl = execute(diff_cmd, extra_ignore_errors=(1, 2),
translate_newlines = False)
# If the input file has ^M characters at end of line, lets ignore them.
dl = dl.replace('\r\r\n', '\r\n')
dl = dl.splitlines(True)
# Special handling for the output of the diff tool on binary files:
# diff outputs "Files a and b differ"
# and the code below expects the output to start with
# "Binary files "
if (len(dl) == 1 and
dl[0].startswith('Files %s and %s differ' % (old_file, new_file))):
dl = ['Binary files %s and %s differ\n' % (old_file, new_file)]
if dl == [] or dl[0].startswith("Binary files "):
if dl == []:
return []
dl.insert(0, "==== %s (%s) ==%s==\n" % (filename, newrevspec,
changetype))
dl.append('\n')
else:
dl[0] = "--- %s\t%s\n" % (filename, parentrevspec)
dl[1] = "+++ %s\t%s\n" % (filename, newrevspec)
# Not everybody has files that end in a newline. This ensures
# that the resulting diff file isn't broken.
if dl[-1][-1] != '\n':
dl.append('\n')
return dl
def _write_file(self, filename, filespec, tmpfile):
""" Grabs a file from Plastic and writes it to a temp file """
logging.debug("Writing '%s' (rev %s) to '%s'"
% (filename, filespec, tmpfile))
execute(["cm", "cat", filespec, "--file=" + tmpfile])
| mit |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.3/Lib/lib-old/tb.py | 5 | 4072 | # Print tracebacks, with a dump of local variables.
# Also an interactive stack trace browser.
# Note -- this module is obsolete -- use pdb.pm() instead.
import sys
import os
from stat import *
import linecache
def br(): browser(sys.last_traceback)
def tb(): printtb(sys.last_traceback)
def browser(tb):
if not tb:
print 'No traceback.'
return
tblist = []
while tb:
tblist.append(tb)
tb = tb.tb_next
ptr = len(tblist)-1
tb = tblist[ptr]
while 1:
if tb != tblist[ptr]:
tb = tblist[ptr]
print `ptr` + ':',
printtbheader(tb)
try:
line = raw_input('TB: ')
except KeyboardInterrupt:
print '\n[Interrupted]'
break
except EOFError:
print '\n[EOF]'
break
cmd = line.strip()
if cmd:
if cmd == 'quit':
break
elif cmd == 'list':
browserlist(tb)
elif cmd == 'up':
if ptr-1 >= 0: ptr = ptr-1
else: print 'Bottom of stack.'
elif cmd == 'down':
if ptr+1 < len(tblist): ptr = ptr+1
else: print 'Top of stack.'
elif cmd == 'locals':
printsymbols(tb.tb_frame.f_locals)
elif cmd == 'globals':
printsymbols(tb.tb_frame.f_globals)
elif cmd in ('?', 'help'):
browserhelp()
else:
browserexec(tb, cmd)
def browserlist(tb):
filename = tb.tb_frame.f_code.co_filename
lineno = tb.tb_lineno
last = lineno
first = max(1, last-10)
for i in range(first, last+1):
if i == lineno: prefix = '***' + `i`.rjust(4) + ':'
else: prefix = `i`.rjust(7) + ':'
line = linecache.getline(filename, i)
if line[-1:] == '\n': line = line[:-1]
print prefix + line
def browserexec(tb, cmd):
locals = tb.tb_frame.f_locals
globals = tb.tb_frame.f_globals
try:
exec cmd+'\n' in globals, locals
except:
t, v = sys.exc_info()[:2]
print '*** Exception:',
if type(t) is type(''):
print t,
else:
print t.__name__,
if v is not None:
print ':', v,
print
print 'Type help to get help.'
def browserhelp():
print
print ' This is the traceback browser. Commands are:'
print ' up : move one level up in the call stack'
print ' down : move one level down in the call stack'
print ' locals : print all local variables at this level'
print ' globals : print all global variables at this level'
print ' list : list source code around the failure'
print ' help : print help (what you are reading now)'
print ' quit : back to command interpreter'
print ' Typing any other 1-line statement will execute it'
print ' using the current level\'s symbol tables'
print
def printtb(tb):
while tb:
print1tb(tb)
tb = tb.tb_next
def print1tb(tb):
printtbheader(tb)
if tb.tb_frame.f_locals is not tb.tb_frame.f_globals:
printsymbols(tb.tb_frame.f_locals)
def printtbheader(tb):
filename = tb.tb_frame.f_code.co_filename
lineno = tb.tb_lineno
info = '"' + filename + '"(' + `lineno` + ')'
line = linecache.getline(filename, lineno)
if line:
info = info + ': ' + line.strip()
print info
def printsymbols(d):
keys = d.keys()
keys.sort()
for name in keys:
print ' ' + name.ljust(12) + ':',
printobject(d[name], 4)
print
def printobject(v, maxlevel):
if v is None:
print 'None',
elif type(v) in (type(0), type(0.0)):
print v,
elif type(v) is type(''):
if len(v) > 20:
print `v[:17] + '...'`,
else:
print `v`,
elif type(v) is type(()):
print '(',
printlist(v, maxlevel)
print ')',
elif type(v) is type([]):
print '[',
printlist(v, maxlevel)
print ']',
elif type(v) is type({}):
print '{',
printdict(v, maxlevel)
print '}',
else:
print v,
def printlist(v, maxlevel):
n = len(v)
if n == 0: return
if maxlevel <= 0:
print '...',
return
for i in range(min(6, n)):
printobject(v[i], maxlevel-1)
if i+1 < n: print ',',
if n > 6: print '...',
def printdict(v, maxlevel):
keys = v.keys()
n = len(keys)
if n == 0: return
if maxlevel <= 0:
print '...',
return
keys.sort()
for i in range(min(6, n)):
key = keys[i]
print `key` + ':',
printobject(v[key], maxlevel-1)
if i+1 < n: print ',',
if n > 6: print '...',
| mit |
10clouds/edx-platform | openedx/core/djangolib/js_utils.py | 13 | 3536 | """
Utilities for dealing with Javascript and JSON.
"""
import json
from django.utils.html import escapejs
from mako.filters import decode
from markupsafe import escape
from xmodule.modulestore import EdxJSONEncoder
def _escape_json_for_js(json_dumps_string):
"""
Escape output of JSON dumps that is safe to be embedded in a <SCRIPT> tag.
This implementation is based on escaping performed in
simplejson.JSONEncoderForHTML.
Arguments:
json_dumps_string (string): A JSON string to be escaped.
This must be the output of json.dumps to ensure:
1. The string contains valid JSON, and
2. That non-ascii characters are properly escaped
Returns:
(string) Escaped JSON that is safe to be embedded in HTML.
"""
json_dumps_string = json_dumps_string.replace("&", "\\u0026")
json_dumps_string = json_dumps_string.replace(">", "\\u003e")
json_dumps_string = json_dumps_string.replace("<", "\\u003c")
return json_dumps_string
def dump_js_escaped_json(obj, cls=EdxJSONEncoder):
"""
JSON dumps and escapes objects that are safe to be embedded in JavaScript.
Use this for anything but strings (e.g. dicts, tuples, lists, bools, and
numbers). For strings, use js_escaped_string.
The output of this method is also usable as plain-old JSON.
Usage:
Used as follows in a Mako template inside a <SCRIPT> tag::
var json_obj = ${obj | n, dump_js_escaped_json}
If you must use the cls argument, then use as follows::
var json_obj = ${dump_js_escaped_json(obj, cls) | n}
Use the "n" Mako filter above. It is possible that the default filter
may include html escaping in the future, and this ensures proper
escaping.
Ensure ascii in json.dumps (ensure_ascii=True) allows safe skipping of
Mako's default filter decode.utf8.
Arguments:
obj: The object soon to become a JavaScript escaped JSON string. The
object can be anything but strings (e.g. dicts, tuples, lists, bools, and
numbers).
cls (class): The JSON encoder class (defaults to EdxJSONEncoder).
Returns:
(string) Escaped encoded JSON.
"""
json_string = json.dumps(obj, ensure_ascii=True, cls=cls)
json_string = _escape_json_for_js(json_string)
return json_string
def js_escaped_string(string_for_js):
"""
Mako filter that escapes text for use in a JavaScript string.
If None is provided, returns an empty string.
Usage:
Used as follows in a Mako template inside a <SCRIPT> tag::
var my_string_for_js = "${my_string_for_js | n, js_escaped_string}"
The surrounding quotes for the string must be included.
Use the "n" Mako filter above. It is possible that the default filter
may include html escaping in the future, and this ensures proper
escaping.
Mako's default filter decode.utf8 is applied here since this default
filter is skipped in the Mako template with "n".
Arguments:
string_for_js (string): Text to be properly escaped for use in a
JavaScript string.
Returns:
(string) Text properly escaped for use in a JavaScript string as
unicode. Returns empty string if argument is None.
"""
if string_for_js is None:
string_for_js = ""
string_for_js = decode.utf8(string_for_js)
string_for_js = escapejs(string_for_js)
return string_for_js
| agpl-3.0 |
Openlights/firemix | plugins/dissolve.py | 1 | 1064 | # This file is part of Firemix.
#
# Copyright 2013-2016 Jonathan Evans <jon@craftyjon.com>
#
# Firemix is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Firemix is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Firemix. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from lib.colors import hls_blend
from lib.transition import Transition
class Dissolve(Transition):
def __init__(self, app):
Transition.__init__(self, app)
def __str__(self):
return "Dissolve"
def render(self, start, end, progress, out):
hls_blend(start, end, out, progress, 'add', 1.0, 1.0)
| gpl-3.0 |
StackStorm/st2 | st2common/tests/unit/test_logging.py | 3 | 1850 | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import unittest2
from st2common.logging.misc import get_logger_name_for_module
from st2reactor.cmd import sensormanager
from python_runner import python_runner
from st2common import runners
__all__ = ["LoggingMiscUtilsTestCase"]
class LoggingMiscUtilsTestCase(unittest2.TestCase):
def test_get_logger_name_for_module(self):
logger_name = get_logger_name_for_module(sensormanager)
self.assertEqual(logger_name, "st2reactor.cmd.sensormanager")
logger_name = get_logger_name_for_module(python_runner)
result = logger_name.endswith(
"contrib.runners.python_runner.python_runner.python_runner"
)
self.assertTrue(result)
logger_name = get_logger_name_for_module(
python_runner, exclude_module_name=True
)
self.assertTrue(
logger_name.endswith("contrib.runners.python_runner.python_runner")
)
logger_name = get_logger_name_for_module(runners)
self.assertEqual(logger_name, "st2common.runners.__init__")
logger_name = get_logger_name_for_module(runners, exclude_module_name=True)
self.assertEqual(logger_name, "st2common.runners")
| apache-2.0 |
chrismeyersfsu/ansible | lib/ansible/modules/cloud/amazon/ecs_cluster.py | 48 | 8158 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ecs_cluster
short_description: create or terminate ecs clusters
notes:
- When deleting a cluster, the information returned is the state of the cluster prior to deletion.
- It will also wait for a cluster to have instances registered to it.
description:
- Creates or terminates ecs clusters.
version_added: "2.0"
author: Mark Chance(@Java1Guy)
requirements: [ boto, boto3 ]
options:
state:
description:
- The desired state of the cluster
required: true
choices: ['present', 'absent', 'has_instances']
name:
description:
- The cluster name
required: true
delay:
description:
- Number of seconds to wait
required: false
repeat:
description:
- The number of times to wait for the cluster to have an instance
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Cluster creation
- ecs_cluster:
name: default
state: present
# Cluster deletion
- ecs_cluster:
name: default
state: absent
- name: Wait for register
ecs_cluster:
name: "{{ new_cluster }}"
state: has_instances
delay: 10
repeat: 10
register: task_output
'''
RETURN = '''
activeServicesCount:
description: how many services are active in this cluster
returned: 0 if a new cluster
type: int
clusterArn:
description: the ARN of the cluster just created
type: string (ARN)
sample: arn:aws:ecs:us-west-2:172139249013:cluster/test-cluster-mfshcdok
clusterName:
description: name of the cluster just created (should match the input argument)
type: string
sample: test-cluster-mfshcdok
pendingTasksCount:
description: how many tasks are waiting to run in this cluster
returned: 0 if a new cluster
type: int
registeredContainerInstancesCount:
description: how many container instances are available in this cluster
returned: 0 if a new cluster
type: int
runningTasksCount:
description: how many tasks are running in this cluster
returned: 0 if a new cluster
type: int
status:
description: the status of the new cluster
returned: ACTIVE
type: string
'''
import time
try:
import boto
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
class EcsClusterManager:
"""Handles ECS Clusters"""
def __init__(self, module):
self.module = module
try:
# self.ecs = boto3.client('ecs')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
self.module.fail_json(msg="Can't authorize connection - %s" % str(e))
def find_in_array(self, array_of_clusters, cluster_name, field_name='clusterArn'):
for c in array_of_clusters:
if c[field_name].endswith(cluster_name):
return c
return None
def describe_cluster(self, cluster_name):
response = self.ecs.describe_clusters(clusters=[
cluster_name
])
if len(response['failures'])>0:
c = self.find_in_array(response['failures'], cluster_name, 'arn')
if c and c['reason']=='MISSING':
return None
# fall thru and look through found ones
if len(response['clusters'])>0:
c = self.find_in_array(response['clusters'], cluster_name)
if c:
return c
raise Exception("Unknown problem describing cluster %s." % cluster_name)
def create_cluster(self, clusterName = 'default'):
response = self.ecs.create_cluster(clusterName=clusterName)
return response['cluster']
def delete_cluster(self, clusterName):
return self.ecs.delete_cluster(cluster=clusterName)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent', 'has_instances'] ),
name=dict(required=True, type='str' ),
delay=dict(required=False, type='int', default=10),
repeat=dict(required=False, type='int', default=10)
))
required_together = ( ['state', 'name'] )
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together)
if not HAS_BOTO:
module.fail_json(msg='boto is required.')
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
cluster_mgr = EcsClusterManager(module)
try:
existing = cluster_mgr.describe_cluster(module.params['name'])
except Exception as e:
module.fail_json(msg="Exception describing cluster '"+module.params['name']+"': "+str(e))
results = dict(changed=False)
if module.params['state'] == 'present':
if existing and 'status' in existing and existing['status']=="ACTIVE":
results['cluster']=existing
else:
if not module.check_mode:
# doesn't exist. create it.
results['cluster'] = cluster_mgr.create_cluster(module.params['name'])
results['changed'] = True
# delete the cluster
elif module.params['state'] == 'absent':
if not existing:
pass
else:
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
results['cluster'] = existing
if 'status' in existing and existing['status']=="INACTIVE":
results['changed'] = False
else:
if not module.check_mode:
cluster_mgr.delete_cluster(module.params['name'])
results['changed'] = True
elif module.params['state'] == 'has_instances':
if not existing:
module.fail_json(msg="Cluster '"+module.params['name']+" not found.")
return
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
delay = module.params['delay']
repeat = module.params['repeat']
time.sleep(delay)
count = 0
for i in range(repeat):
existing = cluster_mgr.describe_cluster(module.params['name'])
count = existing['registeredContainerInstancesCount']
if count > 0:
results['changed'] = True
break
time.sleep(delay)
if count == 0 and i is repeat-1:
module.fail_json(msg="Cluster instance count still zero after "+str(repeat)+" tries of "+str(delay)+" seconds each.")
return
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
monash-merc/cvl-fabric-launcher | pyinstaller-2.1/PyInstaller/hooks/hook-zmq.py | 10 | 1382 | #-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""
Hook for PyZMQ. Cython based Python bindings for messaging library ZeroMQ.
http://www.zeromq.org/
"""
import glob
import os
import sys
hiddenimports = [
'zmq.core.pysocket',
'zmq.utils.jsonapi',
'zmq.utils.strtypes',
]
def hook(mod):
# If PyZMQ provides its own copy of libzmq, add it to the
# extension-modules TOC so zmq/__init__.py can load it at runtime.
# For predictable behavior, the libzmq search here must be identical
# to the search in zmq/__init__.py.
zmq_directory = os.path.dirname(mod.__file__)
for ext in ('pyd', 'so', 'dll', 'dylib'):
bundled = glob.glob(os.path.join(zmq_directory, 'libzmq*.%s*' % ext))
if bundled:
# zmq/__init__.py will look in os.join(sys._MEIPASS, 'zmq'),
# so libzmq has to land there.
name = os.path.join('zmq', os.path.basename(bundled[0]))
mod.binaries.append((name, bundled[0], 'BINARY'))
break
return mod
| gpl-3.0 |
Loller79/Solid_Kernel-GEEHRC | scripts/gcc-wrapper.py | 473 | 3422 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"alignment.c:327",
"mmu.c:602",
"return_address.c:62",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| gpl-2.0 |
Universal-Model-Converter/UMC3.0a | data/Python/x86/Lib/site-packages/OpenGL/GL/SGIX/async.py | 4 | 2617 | '''OpenGL extension SGIX.async
This module customises the behaviour of the
OpenGL.raw.GL.SGIX.async to provide a more
Python-friendly API
Overview (from the spec)
This extension provides a framework for asynchronous OpenGL
commands. It also provides commands allowing a program to wait
for the completion of asynchronous commands.
Asynchronous commands have two properties:
1) Asynchronous commands are non-blocking. For example, an
asynchronous ReadPixels command returns control to the program
immediately rather than blocking until the command completes.
This property allows the program to issue other OpenGL commands in
parallel with the execution of commands that normally block.
2) Asynchronous commands may complete out-of-order with respect to
other OpenGL commands. For example, an asynchronous TexImage
command may complete after subsequent OpenGL commands issued by
the program rather than maintaining the normal serial order of the
OpenGL command stream. This property allows the graphics
accelerator to execute asynchronous commands in parallel with the
normal command stream, for instance using a secondary path to
transfer data from or to the host, without doing any dependency
checking.
Programs that issue asynchronous commands must also be able to
determine when the commands have completed. The completion status
may be needed so that results can be retrieved (e.g. the image
data from a ReadPixels command) or so that dependent commands can
be issued (e.g. drawing commands that use texture data downloaded
by an earlier asynchronous command). This extension provides
fine-grain control over asynchronous commands by introducing a
mechanism for determining the status of individual commands.
Each invocation of an asynchronous command is associated with an
integer called a "marker." A program specifies a marker before it
issues an asynchronous command. The program may later issue a
command to query if any asynchronous commands have completed. The
query commands return a marker to identify the command that
completed. This extension provides both blocking and non-blocking
query commands.
This extension does not define any asynchronous commands.
See SGIX_async_pixel for the asynchronous pixel commands.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/SGIX/async.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.SGIX.async import *
### END AUTOGENERATED SECTION | mit |
nvoron23/brython | site/tests/unittests/test/test_descr.py | 24 | 160529 | import builtins
import gc
import sys
import types
import math
import unittest
import weakref
from copy import deepcopy
from test import support
class OperatorsTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.binops = {
'add': '+',
'sub': '-',
'mul': '*',
'div': '/',
'divmod': 'divmod',
'pow': '**',
'lshift': '<<',
'rshift': '>>',
'and': '&',
'xor': '^',
'or': '|',
'cmp': 'cmp',
'lt': '<',
'le': '<=',
'eq': '==',
'ne': '!=',
'gt': '>',
'ge': '>=',
}
for name, expr in list(self.binops.items()):
if expr.islower():
expr = expr + "(a, b)"
else:
expr = 'a %s b' % expr
self.binops[name] = expr
self.unops = {
'pos': '+',
'neg': '-',
'abs': 'abs',
'invert': '~',
'int': 'int',
'float': 'float',
'oct': 'oct',
'hex': 'hex',
}
for name, expr in list(self.unops.items()):
if expr.islower():
expr = expr + "(a)"
else:
expr = '%s a' % expr
self.unops[name] = expr
def unop_test(self, a, res, expr="len(a)", meth="__len__"):
d = {'a': a}
self.assertEqual(eval(expr, d), res)
t = type(a)
m = getattr(t, meth)
# Find method in parent class
while meth not in t.__dict__:
t = t.__bases__[0]
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
self.assertEqual(m(a), res)
bm = getattr(a, meth)
self.assertEqual(bm(), res)
def binop_test(self, a, b, res, expr="a+b", meth="__add__"):
d = {'a': a, 'b': b}
# XXX Hack so this passes before 2.3 when -Qnew is specified.
if meth == "__div__" and 1/2 == 0.5:
meth = "__truediv__"
if meth == '__divmod__': pass
self.assertEqual(eval(expr, d), res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
self.assertEqual(m(a, b), res)
bm = getattr(a, meth)
self.assertEqual(bm(b), res)
def sliceop_test(self, a, b, c, res, expr="a[b:c]", meth="__getitem__"):
d = {'a': a, 'b': b, 'c': c}
self.assertEqual(eval(expr, d), res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
self.assertEqual(m(a, slice(b, c)), res)
bm = getattr(a, meth)
self.assertEqual(bm(slice(b, c)), res)
def setop_test(self, a, b, res, stmt="a+=b", meth="__iadd__"):
d = {'a': deepcopy(a), 'b': b}
exec(stmt, d)
self.assertEqual(d['a'], res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
d['a'] = deepcopy(a)
m(d['a'], b)
self.assertEqual(d['a'], res)
d['a'] = deepcopy(a)
bm = getattr(d['a'], meth)
bm(b)
self.assertEqual(d['a'], res)
def set2op_test(self, a, b, c, res, stmt="a[b]=c", meth="__setitem__"):
d = {'a': deepcopy(a), 'b': b, 'c': c}
exec(stmt, d)
self.assertEqual(d['a'], res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
d['a'] = deepcopy(a)
m(d['a'], b, c)
self.assertEqual(d['a'], res)
d['a'] = deepcopy(a)
bm = getattr(d['a'], meth)
bm(b, c)
self.assertEqual(d['a'], res)
def setsliceop_test(self, a, b, c, d, res, stmt="a[b:c]=d", meth="__setitem__"):
dictionary = {'a': deepcopy(a), 'b': b, 'c': c, 'd': d}
exec(stmt, dictionary)
self.assertEqual(dictionary['a'], res)
t = type(a)
while meth not in t.__dict__:
t = t.__bases__[0]
m = getattr(t, meth)
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
dictionary['a'] = deepcopy(a)
m(dictionary['a'], slice(b, c), d)
self.assertEqual(dictionary['a'], res)
dictionary['a'] = deepcopy(a)
bm = getattr(dictionary['a'], meth)
bm(slice(b, c), d)
self.assertEqual(dictionary['a'], res)
def test_lists(self):
# Testing list operations...
# Asserts are within individual test methods
self.binop_test([1], [2], [1,2], "a+b", "__add__")
self.binop_test([1,2,3], 2, 1, "b in a", "__contains__")
self.binop_test([1,2,3], 4, 0, "b in a", "__contains__")
self.binop_test([1,2,3], 1, 2, "a[b]", "__getitem__")
self.sliceop_test([1,2,3], 0, 2, [1,2], "a[b:c]", "__getitem__")
self.setop_test([1], [2], [1,2], "a+=b", "__iadd__")
self.setop_test([1,2], 3, [1,2,1,2,1,2], "a*=b", "__imul__")
self.unop_test([1,2,3], 3, "len(a)", "__len__")
self.binop_test([1,2], 3, [1,2,1,2,1,2], "a*b", "__mul__")
self.binop_test([1,2], 3, [1,2,1,2,1,2], "b*a", "__rmul__")
self.set2op_test([1,2], 1, 3, [1,3], "a[b]=c", "__setitem__")
self.setsliceop_test([1,2,3,4], 1, 3, [5,6], [1,5,6,4], "a[b:c]=d",
"__setitem__")
def test_dicts(self):
# Testing dict operations...
self.binop_test({1:2,3:4}, 1, 1, "b in a", "__contains__")
self.binop_test({1:2,3:4}, 2, 0, "b in a", "__contains__")
self.binop_test({1:2,3:4}, 1, 2, "a[b]", "__getitem__")
d = {1:2, 3:4}
l1 = []
for i in list(d.keys()):
l1.append(i)
l = []
for i in iter(d):
l.append(i)
self.assertEqual(l, l1)
l = []
for i in d.__iter__():
l.append(i)
self.assertEqual(l, l1)
l = []
for i in dict.__iter__(d):
l.append(i)
self.assertEqual(l, l1)
d = {1:2, 3:4}
self.unop_test(d, 2, "len(a)", "__len__")
self.assertEqual(eval(repr(d), {}), d)
self.assertEqual(eval(d.__repr__(), {}), d)
self.set2op_test({1:2,3:4}, 2, 3, {1:2,2:3,3:4}, "a[b]=c",
"__setitem__")
# Tests for unary and binary operators
def number_operators(self, a, b, skip=[]):
dict = {'a': a, 'b': b}
for name, expr in list(self.binops.items()):
if name not in skip:
name = "__%s__" % name
if hasattr(a, name):
res = eval(expr, dict)
self.binop_test(a, b, res, expr, name)
for name, expr in list(self.unops.items()):
if name not in skip:
name = "__%s__" % name
if hasattr(a, name):
res = eval(expr, dict)
self.unop_test(a, res, expr, name)
def test_ints(self):
# Testing int operations...
self.number_operators(100, 3)
# The following crashes in Python 2.2
self.assertEqual((1).__bool__(), 1)
self.assertEqual((0).__bool__(), 0)
# This returns 'NotImplemented' in Python 2.2
class C(int):
def __add__(self, other):
return NotImplemented
self.assertEqual(C(5), 5)
try:
C() + ""
except TypeError:
pass
else:
self.fail("NotImplemented should have caused TypeError")
def test_floats(self):
# Testing float operations...
self.number_operators(100.0, 3.0)
def test_complexes(self):
# Testing complex operations...
self.number_operators(100.0j, 3.0j, skip=['lt', 'le', 'gt', 'ge',
'int', 'float',
'divmod', 'mod'])
class Number(complex):
__slots__ = ['prec']
def __new__(cls, *args, **kwds):
result = complex.__new__(cls, *args)
result.prec = kwds.get('prec', 12)
return result
def __repr__(self):
prec = self.prec
if self.imag == 0.0:
return "%.*g" % (prec, self.real)
if self.real == 0.0:
return "%.*gj" % (prec, self.imag)
return "(%.*g+%.*gj)" % (prec, self.real, prec, self.imag)
__str__ = __repr__
a = Number(3.14, prec=6)
self.assertEqual(repr(a), "3.14")
self.assertEqual(a.prec, 6)
a = Number(a, prec=2)
self.assertEqual(repr(a), "3.1")
self.assertEqual(a.prec, 2)
a = Number(234.5)
self.assertEqual(repr(a), "234.5")
self.assertEqual(a.prec, 12)
def test_explicit_reverse_methods(self):
# see issue 9930
self.assertEqual(complex.__radd__(3j, 4.0), complex(4.0, 3.0))
self.assertEqual(float.__rsub__(3.0, 1), -2.0)
@support.impl_detail("the module 'xxsubtype' is internal")
def test_spam_lists(self):
# Testing spamlist operations...
import copy, xxsubtype as spam
def spamlist(l, memo=None):
import xxsubtype as spam
return spam.spamlist(l)
# This is an ugly hack:
copy._deepcopy_dispatch[spam.spamlist] = spamlist
self.binop_test(spamlist([1]), spamlist([2]), spamlist([1,2]), "a+b",
"__add__")
self.binop_test(spamlist([1,2,3]), 2, 1, "b in a", "__contains__")
self.binop_test(spamlist([1,2,3]), 4, 0, "b in a", "__contains__")
self.binop_test(spamlist([1,2,3]), 1, 2, "a[b]", "__getitem__")
self.sliceop_test(spamlist([1,2,3]), 0, 2, spamlist([1,2]), "a[b:c]",
"__getitem__")
self.setop_test(spamlist([1]), spamlist([2]), spamlist([1,2]), "a+=b",
"__iadd__")
self.setop_test(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "a*=b",
"__imul__")
self.unop_test(spamlist([1,2,3]), 3, "len(a)", "__len__")
self.binop_test(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "a*b",
"__mul__")
self.binop_test(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "b*a",
"__rmul__")
self.set2op_test(spamlist([1,2]), 1, 3, spamlist([1,3]), "a[b]=c",
"__setitem__")
self.setsliceop_test(spamlist([1,2,3,4]), 1, 3, spamlist([5,6]),
spamlist([1,5,6,4]), "a[b:c]=d", "__setitem__")
# Test subclassing
class C(spam.spamlist):
def foo(self): return 1
a = C()
self.assertEqual(a, [])
self.assertEqual(a.foo(), 1)
a.append(100)
self.assertEqual(a, [100])
self.assertEqual(a.getstate(), 0)
a.setstate(42)
self.assertEqual(a.getstate(), 42)
@support.impl_detail("the module 'xxsubtype' is internal")
def test_spam_dicts(self):
# Testing spamdict operations...
import copy, xxsubtype as spam
def spamdict(d, memo=None):
import xxsubtype as spam
sd = spam.spamdict()
for k, v in list(d.items()):
sd[k] = v
return sd
# This is an ugly hack:
copy._deepcopy_dispatch[spam.spamdict] = spamdict
self.binop_test(spamdict({1:2,3:4}), 1, 1, "b in a", "__contains__")
self.binop_test(spamdict({1:2,3:4}), 2, 0, "b in a", "__contains__")
self.binop_test(spamdict({1:2,3:4}), 1, 2, "a[b]", "__getitem__")
d = spamdict({1:2,3:4})
l1 = []
for i in list(d.keys()):
l1.append(i)
l = []
for i in iter(d):
l.append(i)
self.assertEqual(l, l1)
l = []
for i in d.__iter__():
l.append(i)
self.assertEqual(l, l1)
l = []
for i in type(spamdict({})).__iter__(d):
l.append(i)
self.assertEqual(l, l1)
straightd = {1:2, 3:4}
spamd = spamdict(straightd)
self.unop_test(spamd, 2, "len(a)", "__len__")
self.unop_test(spamd, repr(straightd), "repr(a)", "__repr__")
self.set2op_test(spamdict({1:2,3:4}), 2, 3, spamdict({1:2,2:3,3:4}),
"a[b]=c", "__setitem__")
# Test subclassing
class C(spam.spamdict):
def foo(self): return 1
a = C()
self.assertEqual(list(a.items()), [])
self.assertEqual(a.foo(), 1)
a['foo'] = 'bar'
self.assertEqual(list(a.items()), [('foo', 'bar')])
self.assertEqual(a.getstate(), 0)
a.setstate(100)
self.assertEqual(a.getstate(), 100)
class ClassPropertiesAndMethods(unittest.TestCase):
def test_python_dicts(self):
# Testing Python subclass of dict...
self.assertTrue(issubclass(dict, dict))
self.assertIsInstance({}, dict)
d = dict()
self.assertEqual(d, {})
self.assertTrue(d.__class__ is dict)
self.assertIsInstance(d, dict)
class C(dict):
state = -1
def __init__(self_local, *a, **kw):
if a:
self.assertEqual(len(a), 1)
self_local.state = a[0]
if kw:
for k, v in list(kw.items()):
self_local[v] = k
def __getitem__(self, key):
return self.get(key, 0)
def __setitem__(self_local, key, value):
self.assertIsInstance(key, type(0))
dict.__setitem__(self_local, key, value)
def setstate(self, state):
self.state = state
def getstate(self):
return self.state
self.assertTrue(issubclass(C, dict))
a1 = C(12)
self.assertEqual(a1.state, 12)
a2 = C(foo=1, bar=2)
self.assertEqual(a2[1] == 'foo' and a2[2], 'bar')
a = C()
self.assertEqual(a.state, -1)
self.assertEqual(a.getstate(), -1)
a.setstate(0)
self.assertEqual(a.state, 0)
self.assertEqual(a.getstate(), 0)
a.setstate(10)
self.assertEqual(a.state, 10)
self.assertEqual(a.getstate(), 10)
self.assertEqual(a[42], 0)
a[42] = 24
self.assertEqual(a[42], 24)
N = 50
for i in range(N):
a[i] = C()
for j in range(N):
a[i][j] = i*j
for i in range(N):
for j in range(N):
self.assertEqual(a[i][j], i*j)
def test_python_lists(self):
# Testing Python subclass of list...
class C(list):
def __getitem__(self, i):
if isinstance(i, slice):
return i.start, i.stop
return list.__getitem__(self, i) + 100
a = C()
a.extend([0,1,2])
self.assertEqual(a[0], 100)
self.assertEqual(a[1], 101)
self.assertEqual(a[2], 102)
self.assertEqual(a[100:200], (100,200))
def test_metaclass(self):
# Testing metaclasses...
class C(metaclass=type):
def __init__(self):
self.__state = 0
def getstate(self):
return self.__state
def setstate(self, state):
self.__state = state
a = C()
self.assertEqual(a.getstate(), 0)
a.setstate(10)
self.assertEqual(a.getstate(), 10)
class _metaclass(type):
def myself(cls): return cls
class D(metaclass=_metaclass):
pass
self.assertEqual(D.myself(), D)
d = D()
self.assertEqual(d.__class__, D)
class M1(type):
def __new__(cls, name, bases, dict):
dict['__spam__'] = 1
return type.__new__(cls, name, bases, dict)
class C(metaclass=M1):
pass
self.assertEqual(C.__spam__, 1)
c = C()
self.assertEqual(c.__spam__, 1)
class _instance(object):
pass
class M2(object):
@staticmethod
def __new__(cls, name, bases, dict):
self = object.__new__(cls)
self.name = name
self.bases = bases
self.dict = dict
return self
def __call__(self):
it = _instance()
# Early binding of methods
for key in self.dict:
if key.startswith("__"):
continue
setattr(it, key, self.dict[key].__get__(it, self))
return it
class C(metaclass=M2):
def spam(self):
return 42
self.assertEqual(C.name, 'C')
self.assertEqual(C.bases, ())
self.assertIn('spam', C.dict)
c = C()
self.assertEqual(c.spam(), 42)
# More metaclass examples
class autosuper(type):
# Automatically add __super to the class
# This trick only works for dynamic classes
def __new__(metaclass, name, bases, dict):
cls = super(autosuper, metaclass).__new__(metaclass,
name, bases, dict)
# Name mangling for __super removes leading underscores
while name[:1] == "_":
name = name[1:]
if name:
name = "_%s__super" % name
else:
name = "__super"
setattr(cls, name, super(cls))
return cls
class A(metaclass=autosuper):
def meth(self):
return "A"
class B(A):
def meth(self):
return "B" + self.__super.meth()
class C(A):
def meth(self):
return "C" + self.__super.meth()
class D(C, B):
def meth(self):
return "D" + self.__super.meth()
self.assertEqual(D().meth(), "DCBA")
class E(B, C):
def meth(self):
return "E" + self.__super.meth()
self.assertEqual(E().meth(), "EBCA")
class autoproperty(type):
# Automatically create property attributes when methods
# named _get_x and/or _set_x are found
def __new__(metaclass, name, bases, dict):
hits = {}
for key, val in dict.items():
if key.startswith("_get_"):
key = key[5:]
get, set = hits.get(key, (None, None))
get = val
hits[key] = get, set
elif key.startswith("_set_"):
key = key[5:]
get, set = hits.get(key, (None, None))
set = val
hits[key] = get, set
for key, (get, set) in hits.items():
dict[key] = property(get, set)
return super(autoproperty, metaclass).__new__(metaclass,
name, bases, dict)
class A(metaclass=autoproperty):
def _get_x(self):
return -self.__x
def _set_x(self, x):
self.__x = -x
a = A()
self.assertTrue(not hasattr(a, "x"))
a.x = 12
self.assertEqual(a.x, 12)
self.assertEqual(a._A__x, -12)
class multimetaclass(autoproperty, autosuper):
# Merge of multiple cooperating metaclasses
pass
class A(metaclass=multimetaclass):
def _get_x(self):
return "A"
class B(A):
def _get_x(self):
return "B" + self.__super._get_x()
class C(A):
def _get_x(self):
return "C" + self.__super._get_x()
class D(C, B):
def _get_x(self):
return "D" + self.__super._get_x()
self.assertEqual(D().x, "DCBA")
# Make sure type(x) doesn't call x.__class__.__init__
class T(type):
counter = 0
def __init__(self, *args):
T.counter += 1
class C(metaclass=T):
pass
self.assertEqual(T.counter, 1)
a = C()
self.assertEqual(type(a), C)
self.assertEqual(T.counter, 1)
class C(object): pass
c = C()
try: c()
except TypeError: pass
else: self.fail("calling object w/o call method should raise "
"TypeError")
# Testing code to find most derived baseclass
class A(type):
def __new__(*args, **kwargs):
return type.__new__(*args, **kwargs)
class B(object):
pass
class C(object, metaclass=A):
pass
# The most derived metaclass of D is A rather than type.
class D(B, C):
pass
self.assertIs(A, type(D))
# issue1294232: correct metaclass calculation
new_calls = [] # to check the order of __new__ calls
class AMeta(type):
@staticmethod
def __new__(mcls, name, bases, ns):
new_calls.append('AMeta')
return super().__new__(mcls, name, bases, ns)
@classmethod
def __prepare__(mcls, name, bases):
return {}
class BMeta(AMeta):
@staticmethod
def __new__(mcls, name, bases, ns):
new_calls.append('BMeta')
return super().__new__(mcls, name, bases, ns)
@classmethod
def __prepare__(mcls, name, bases):
ns = super().__prepare__(name, bases)
ns['BMeta_was_here'] = True
return ns
class A(metaclass=AMeta):
pass
self.assertEqual(['AMeta'], new_calls)
new_calls.clear()
class B(metaclass=BMeta):
pass
# BMeta.__new__ calls AMeta.__new__ with super:
self.assertEqual(['BMeta', 'AMeta'], new_calls)
new_calls.clear()
class C(A, B):
pass
# The most derived metaclass is BMeta:
self.assertEqual(['BMeta', 'AMeta'], new_calls)
new_calls.clear()
# BMeta.__prepare__ should've been called:
self.assertIn('BMeta_was_here', C.__dict__)
# The order of the bases shouldn't matter:
class C2(B, A):
pass
self.assertEqual(['BMeta', 'AMeta'], new_calls)
new_calls.clear()
self.assertIn('BMeta_was_here', C2.__dict__)
# Check correct metaclass calculation when a metaclass is declared:
class D(C, metaclass=type):
pass
self.assertEqual(['BMeta', 'AMeta'], new_calls)
new_calls.clear()
self.assertIn('BMeta_was_here', D.__dict__)
class E(C, metaclass=AMeta):
pass
self.assertEqual(['BMeta', 'AMeta'], new_calls)
new_calls.clear()
self.assertIn('BMeta_was_here', E.__dict__)
# Special case: the given metaclass isn't a class,
# so there is no metaclass calculation.
marker = object()
def func(*args, **kwargs):
return marker
class X(metaclass=func):
pass
class Y(object, metaclass=func):
pass
class Z(D, metaclass=func):
pass
self.assertIs(marker, X)
self.assertIs(marker, Y)
self.assertIs(marker, Z)
# The given metaclass is a class,
# but not a descendant of type.
prepare_calls = [] # to track __prepare__ calls
class ANotMeta:
def __new__(mcls, *args, **kwargs):
new_calls.append('ANotMeta')
return super().__new__(mcls)
@classmethod
def __prepare__(mcls, name, bases):
prepare_calls.append('ANotMeta')
return {}
class BNotMeta(ANotMeta):
def __new__(mcls, *args, **kwargs):
new_calls.append('BNotMeta')
return super().__new__(mcls)
@classmethod
def __prepare__(mcls, name, bases):
prepare_calls.append('BNotMeta')
return super().__prepare__(name, bases)
class A(metaclass=ANotMeta):
pass
self.assertIs(ANotMeta, type(A))
self.assertEqual(['ANotMeta'], prepare_calls)
prepare_calls.clear()
self.assertEqual(['ANotMeta'], new_calls)
new_calls.clear()
class B(metaclass=BNotMeta):
pass
self.assertIs(BNotMeta, type(B))
self.assertEqual(['BNotMeta', 'ANotMeta'], prepare_calls)
prepare_calls.clear()
self.assertEqual(['BNotMeta', 'ANotMeta'], new_calls)
new_calls.clear()
class C(A, B):
pass
self.assertIs(BNotMeta, type(C))
self.assertEqual(['BNotMeta', 'ANotMeta'], new_calls)
new_calls.clear()
self.assertEqual(['BNotMeta', 'ANotMeta'], prepare_calls)
prepare_calls.clear()
class C2(B, A):
pass
self.assertIs(BNotMeta, type(C2))
self.assertEqual(['BNotMeta', 'ANotMeta'], new_calls)
new_calls.clear()
self.assertEqual(['BNotMeta', 'ANotMeta'], prepare_calls)
prepare_calls.clear()
# This is a TypeError, because of a metaclass conflict:
# BNotMeta is neither a subclass, nor a superclass of type
with self.assertRaises(TypeError):
class D(C, metaclass=type):
pass
class E(C, metaclass=ANotMeta):
pass
self.assertIs(BNotMeta, type(E))
self.assertEqual(['BNotMeta', 'ANotMeta'], new_calls)
new_calls.clear()
self.assertEqual(['BNotMeta', 'ANotMeta'], prepare_calls)
prepare_calls.clear()
class F(object(), C):
pass
self.assertIs(BNotMeta, type(F))
self.assertEqual(['BNotMeta', 'ANotMeta'], new_calls)
new_calls.clear()
self.assertEqual(['BNotMeta', 'ANotMeta'], prepare_calls)
prepare_calls.clear()
class F2(C, object()):
pass
self.assertIs(BNotMeta, type(F2))
self.assertEqual(['BNotMeta', 'ANotMeta'], new_calls)
new_calls.clear()
self.assertEqual(['BNotMeta', 'ANotMeta'], prepare_calls)
prepare_calls.clear()
# TypeError: BNotMeta is neither a
# subclass, nor a superclass of int
with self.assertRaises(TypeError):
class X(C, int()):
pass
with self.assertRaises(TypeError):
class X(int(), C):
pass
def test_module_subclasses(self):
# Testing Python subclass of module...
log = []
MT = type(sys)
class MM(MT):
def __init__(self, name):
MT.__init__(self, name)
def __getattribute__(self, name):
log.append(("getattr", name))
return MT.__getattribute__(self, name)
def __setattr__(self, name, value):
log.append(("setattr", name, value))
MT.__setattr__(self, name, value)
def __delattr__(self, name):
log.append(("delattr", name))
MT.__delattr__(self, name)
a = MM("a")
a.foo = 12
x = a.foo
del a.foo
self.assertEqual(log, [("setattr", "foo", 12),
("getattr", "foo"),
("delattr", "foo")])
# http://python.org/sf/1174712
try:
class Module(types.ModuleType, str):
pass
except TypeError:
pass
else:
self.fail("inheriting from ModuleType and str at the same time "
"should fail")
def test_multiple_inheritance(self):
# Testing multiple inheritance...
class C(object):
def __init__(self):
self.__state = 0
def getstate(self):
return self.__state
def setstate(self, state):
self.__state = state
a = C()
self.assertEqual(a.getstate(), 0)
a.setstate(10)
self.assertEqual(a.getstate(), 10)
class D(dict, C):
def __init__(self):
type({}).__init__(self)
C.__init__(self)
d = D()
self.assertEqual(list(d.keys()), [])
d["hello"] = "world"
self.assertEqual(list(d.items()), [("hello", "world")])
self.assertEqual(d["hello"], "world")
self.assertEqual(d.getstate(), 0)
d.setstate(10)
self.assertEqual(d.getstate(), 10)
self.assertEqual(D.__mro__, (D, dict, C, object))
# SF bug #442833
class Node(object):
def __int__(self):
return int(self.foo())
def foo(self):
return "23"
class Frag(Node, list):
def foo(self):
return "42"
self.assertEqual(Node().__int__(), 23)
self.assertEqual(int(Node()), 23)
self.assertEqual(Frag().__int__(), 42)
self.assertEqual(int(Frag()), 42)
def test_diamond_inheritence(self):
# Testing multiple inheritance special cases...
class A(object):
def spam(self): return "A"
self.assertEqual(A().spam(), "A")
class B(A):
def boo(self): return "B"
def spam(self): return "B"
self.assertEqual(B().spam(), "B")
self.assertEqual(B().boo(), "B")
class C(A):
def boo(self): return "C"
self.assertEqual(C().spam(), "A")
self.assertEqual(C().boo(), "C")
class D(B, C): pass
self.assertEqual(D().spam(), "B")
self.assertEqual(D().boo(), "B")
self.assertEqual(D.__mro__, (D, B, C, A, object))
class E(C, B): pass
self.assertEqual(E().spam(), "B")
self.assertEqual(E().boo(), "C")
self.assertEqual(E.__mro__, (E, C, B, A, object))
# MRO order disagreement
try:
class F(D, E): pass
except TypeError:
pass
else:
self.fail("expected MRO order disagreement (F)")
try:
class G(E, D): pass
except TypeError:
pass
else:
self.fail("expected MRO order disagreement (G)")
# see thread python-dev/2002-October/029035.html
def test_ex5_from_c3_switch(self):
# Testing ex5 from C3 switch discussion...
class A(object): pass
class B(object): pass
class C(object): pass
class X(A): pass
class Y(A): pass
class Z(X,B,Y,C): pass
self.assertEqual(Z.__mro__, (Z, X, B, Y, A, C, object))
# see "A Monotonic Superclass Linearization for Dylan",
# by Kim Barrett et al. (OOPSLA 1996)
def test_monotonicity(self):
# Testing MRO monotonicity...
class Boat(object): pass
class DayBoat(Boat): pass
class WheelBoat(Boat): pass
class EngineLess(DayBoat): pass
class SmallMultihull(DayBoat): pass
class PedalWheelBoat(EngineLess,WheelBoat): pass
class SmallCatamaran(SmallMultihull): pass
class Pedalo(PedalWheelBoat,SmallCatamaran): pass
self.assertEqual(PedalWheelBoat.__mro__,
(PedalWheelBoat, EngineLess, DayBoat, WheelBoat, Boat, object))
self.assertEqual(SmallCatamaran.__mro__,
(SmallCatamaran, SmallMultihull, DayBoat, Boat, object))
self.assertEqual(Pedalo.__mro__,
(Pedalo, PedalWheelBoat, EngineLess, SmallCatamaran,
SmallMultihull, DayBoat, WheelBoat, Boat, object))
# see "A Monotonic Superclass Linearization for Dylan",
# by Kim Barrett et al. (OOPSLA 1996)
def test_consistency_with_epg(self):
# Testing consistency with EPG...
class Pane(object): pass
class ScrollingMixin(object): pass
class EditingMixin(object): pass
class ScrollablePane(Pane,ScrollingMixin): pass
class EditablePane(Pane,EditingMixin): pass
class EditableScrollablePane(ScrollablePane,EditablePane): pass
self.assertEqual(EditableScrollablePane.__mro__,
(EditableScrollablePane, ScrollablePane, EditablePane, Pane,
ScrollingMixin, EditingMixin, object))
def test_mro_disagreement(self):
# Testing error messages for MRO disagreement...
mro_err_msg = """Cannot create a consistent method resolution
order (MRO) for bases """
def raises(exc, expected, callable, *args):
try:
callable(*args)
except exc as msg:
# the exact msg is generally considered an impl detail
if support.check_impl_detail():
if not str(msg).startswith(expected):
self.fail("Message %r, expected %r" %
(str(msg), expected))
else:
self.fail("Expected %s" % exc)
class A(object): pass
class B(A): pass
class C(object): pass
# Test some very simple errors
raises(TypeError, "duplicate base class A",
type, "X", (A, A), {})
raises(TypeError, mro_err_msg,
type, "X", (A, B), {})
raises(TypeError, mro_err_msg,
type, "X", (A, C, B), {})
# Test a slightly more complex error
class GridLayout(object): pass
class HorizontalGrid(GridLayout): pass
class VerticalGrid(GridLayout): pass
class HVGrid(HorizontalGrid, VerticalGrid): pass
class VHGrid(VerticalGrid, HorizontalGrid): pass
raises(TypeError, mro_err_msg,
type, "ConfusedGrid", (HVGrid, VHGrid), {})
def test_object_class(self):
# Testing object class...
a = object()
self.assertEqual(a.__class__, object)
self.assertEqual(type(a), object)
b = object()
self.assertNotEqual(a, b)
self.assertFalse(hasattr(a, "foo"))
try:
a.foo = 12
except (AttributeError, TypeError):
pass
else:
self.fail("object() should not allow setting a foo attribute")
self.assertFalse(hasattr(object(), "__dict__"))
class Cdict(object):
pass
x = Cdict()
self.assertEqual(x.__dict__, {})
x.foo = 1
self.assertEqual(x.foo, 1)
self.assertEqual(x.__dict__, {'foo': 1})
def test_slots(self):
# Testing __slots__...
class C0(object):
__slots__ = []
x = C0()
self.assertFalse(hasattr(x, "__dict__"))
self.assertFalse(hasattr(x, "foo"))
class C1(object):
__slots__ = ['a']
x = C1()
self.assertFalse(hasattr(x, "__dict__"))
self.assertFalse(hasattr(x, "a"))
x.a = 1
self.assertEqual(x.a, 1)
x.a = None
self.assertEqual(x.a, None)
del x.a
self.assertFalse(hasattr(x, "a"))
class C3(object):
__slots__ = ['a', 'b', 'c']
x = C3()
self.assertFalse(hasattr(x, "__dict__"))
self.assertFalse(hasattr(x, 'a'))
self.assertFalse(hasattr(x, 'b'))
self.assertFalse(hasattr(x, 'c'))
x.a = 1
x.b = 2
x.c = 3
self.assertEqual(x.a, 1)
self.assertEqual(x.b, 2)
self.assertEqual(x.c, 3)
class C4(object):
"""Validate name mangling"""
__slots__ = ['__a']
def __init__(self, value):
self.__a = value
def get(self):
return self.__a
x = C4(5)
self.assertFalse(hasattr(x, '__dict__'))
self.assertFalse(hasattr(x, '__a'))
self.assertEqual(x.get(), 5)
try:
x.__a = 6
except AttributeError:
pass
else:
self.fail("Double underscored names not mangled")
# Make sure slot names are proper identifiers
try:
class C(object):
__slots__ = [None]
except TypeError:
pass
else:
self.fail("[None] slots not caught")
try:
class C(object):
__slots__ = ["foo bar"]
except TypeError:
pass
else:
self.fail("['foo bar'] slots not caught")
try:
class C(object):
__slots__ = ["foo\0bar"]
except TypeError:
pass
else:
self.fail("['foo\\0bar'] slots not caught")
try:
class C(object):
__slots__ = ["1"]
except TypeError:
pass
else:
self.fail("['1'] slots not caught")
try:
class C(object):
__slots__ = [""]
except TypeError:
pass
else:
self.fail("[''] slots not caught")
class C(object):
__slots__ = ["a", "a_b", "_a", "A0123456789Z"]
# XXX(nnorwitz): was there supposed to be something tested
# from the class above?
# Test a single string is not expanded as a sequence.
class C(object):
__slots__ = "abc"
c = C()
c.abc = 5
self.assertEqual(c.abc, 5)
# Test unicode slot names
# Test a single unicode string is not expanded as a sequence.
class C(object):
__slots__ = "abc"
c = C()
c.abc = 5
self.assertEqual(c.abc, 5)
# _unicode_to_string used to modify slots in certain circumstances
slots = ("foo", "bar")
class C(object):
__slots__ = slots
x = C()
x.foo = 5
self.assertEqual(x.foo, 5)
self.assertTrue(type(slots[0]) is str)
# this used to leak references
try:
class C(object):
__slots__ = [chr(128)]
except (TypeError, UnicodeEncodeError):
pass
else:
raise TestFailed("[chr(128)] slots not caught")
# Test leaks
class Counted(object):
counter = 0 # counts the number of instances alive
def __init__(self):
Counted.counter += 1
def __del__(self):
Counted.counter -= 1
class C(object):
__slots__ = ['a', 'b', 'c']
x = C()
x.a = Counted()
x.b = Counted()
x.c = Counted()
self.assertEqual(Counted.counter, 3)
del x
support.gc_collect()
self.assertEqual(Counted.counter, 0)
class D(C):
pass
x = D()
x.a = Counted()
x.z = Counted()
self.assertEqual(Counted.counter, 2)
del x
support.gc_collect()
self.assertEqual(Counted.counter, 0)
class E(D):
__slots__ = ['e']
x = E()
x.a = Counted()
x.z = Counted()
x.e = Counted()
self.assertEqual(Counted.counter, 3)
del x
support.gc_collect()
self.assertEqual(Counted.counter, 0)
# Test cyclical leaks [SF bug 519621]
class F(object):
__slots__ = ['a', 'b']
s = F()
s.a = [Counted(), s]
self.assertEqual(Counted.counter, 1)
s = None
support.gc_collect()
self.assertEqual(Counted.counter, 0)
# Test lookup leaks [SF bug 572567]
if hasattr(gc, 'get_objects'):
class G(object):
def __eq__(self, other):
return False
g = G()
orig_objects = len(gc.get_objects())
for i in range(10):
g==g
new_objects = len(gc.get_objects())
self.assertEqual(orig_objects, new_objects)
class H(object):
__slots__ = ['a', 'b']
def __init__(self):
self.a = 1
self.b = 2
def __del__(self_):
self.assertEqual(self_.a, 1)
self.assertEqual(self_.b, 2)
with support.captured_output('stderr') as s:
h = H()
del h
self.assertEqual(s.getvalue(), '')
class X(object):
__slots__ = "a"
with self.assertRaises(AttributeError):
del X().a
def test_slots_special(self):
# Testing __dict__ and __weakref__ in __slots__...
class D(object):
__slots__ = ["__dict__"]
a = D()
self.assertTrue(hasattr(a, "__dict__"))
self.assertFalse(hasattr(a, "__weakref__"))
a.foo = 42
self.assertEqual(a.__dict__, {"foo": 42})
class W(object):
__slots__ = ["__weakref__"]
a = W()
self.assertTrue(hasattr(a, "__weakref__"))
self.assertFalse(hasattr(a, "__dict__"))
try:
a.foo = 42
except AttributeError:
pass
else:
self.fail("shouldn't be allowed to set a.foo")
class C1(W, D):
__slots__ = []
a = C1()
self.assertTrue(hasattr(a, "__dict__"))
self.assertTrue(hasattr(a, "__weakref__"))
a.foo = 42
self.assertEqual(a.__dict__, {"foo": 42})
class C2(D, W):
__slots__ = []
a = C2()
self.assertTrue(hasattr(a, "__dict__"))
self.assertTrue(hasattr(a, "__weakref__"))
a.foo = 42
self.assertEqual(a.__dict__, {"foo": 42})
def test_slots_descriptor(self):
# Issue2115: slot descriptors did not correctly check
# the type of the given object
import abc
class MyABC(metaclass=abc.ABCMeta):
__slots__ = "a"
class Unrelated(object):
pass
MyABC.register(Unrelated)
u = Unrelated()
self.assertIsInstance(u, MyABC)
# This used to crash
self.assertRaises(TypeError, MyABC.a.__set__, u, 3)
def test_dynamics(self):
# Testing class attribute propagation...
class D(object):
pass
class E(D):
pass
class F(D):
pass
D.foo = 1
self.assertEqual(D.foo, 1)
# Test that dynamic attributes are inherited
self.assertEqual(E.foo, 1)
self.assertEqual(F.foo, 1)
# Test dynamic instances
class C(object):
pass
a = C()
self.assertFalse(hasattr(a, "foobar"))
C.foobar = 2
self.assertEqual(a.foobar, 2)
C.method = lambda self: 42
self.assertEqual(a.method(), 42)
C.__repr__ = lambda self: "C()"
self.assertEqual(repr(a), "C()")
C.__int__ = lambda self: 100
self.assertEqual(int(a), 100)
self.assertEqual(a.foobar, 2)
self.assertFalse(hasattr(a, "spam"))
def mygetattr(self, name):
if name == "spam":
return "spam"
raise AttributeError
C.__getattr__ = mygetattr
self.assertEqual(a.spam, "spam")
a.new = 12
self.assertEqual(a.new, 12)
def mysetattr(self, name, value):
if name == "spam":
raise AttributeError
return object.__setattr__(self, name, value)
C.__setattr__ = mysetattr
try:
a.spam = "not spam"
except AttributeError:
pass
else:
self.fail("expected AttributeError")
self.assertEqual(a.spam, "spam")
class D(C):
pass
d = D()
d.foo = 1
self.assertEqual(d.foo, 1)
# Test handling of int*seq and seq*int
class I(int):
pass
self.assertEqual("a"*I(2), "aa")
self.assertEqual(I(2)*"a", "aa")
self.assertEqual(2*I(3), 6)
self.assertEqual(I(3)*2, 6)
self.assertEqual(I(3)*I(2), 6)
# Test comparison of classes with dynamic metaclasses
class dynamicmetaclass(type):
pass
class someclass(metaclass=dynamicmetaclass):
pass
self.assertNotEqual(someclass, object)
def test_errors(self):
# Testing errors...
try:
class C(list, dict):
pass
except TypeError:
pass
else:
self.fail("inheritance from both list and dict should be illegal")
try:
class C(object, None):
pass
except TypeError:
pass
else:
self.fail("inheritance from non-type should be illegal")
class Classic:
pass
try:
class C(type(len)):
pass
except TypeError:
pass
else:
self.fail("inheritance from CFunction should be illegal")
try:
class C(object):
__slots__ = 1
except TypeError:
pass
else:
self.fail("__slots__ = 1 should be illegal")
try:
class C(object):
__slots__ = [1]
except TypeError:
pass
else:
self.fail("__slots__ = [1] should be illegal")
class M1(type):
pass
class M2(type):
pass
class A1(object, metaclass=M1):
pass
class A2(object, metaclass=M2):
pass
try:
class B(A1, A2):
pass
except TypeError:
pass
else:
self.fail("finding the most derived metaclass should have failed")
def test_classmethods(self):
# Testing class methods...
class C(object):
def foo(*a): return a
goo = classmethod(foo)
c = C()
self.assertEqual(C.goo(1), (C, 1))
self.assertEqual(c.goo(1), (C, 1))
self.assertEqual(c.foo(1), (c, 1))
class D(C):
pass
d = D()
self.assertEqual(D.goo(1), (D, 1))
self.assertEqual(d.goo(1), (D, 1))
self.assertEqual(d.foo(1), (d, 1))
self.assertEqual(D.foo(d, 1), (d, 1))
# Test for a specific crash (SF bug 528132)
def f(cls, arg): return (cls, arg)
ff = classmethod(f)
self.assertEqual(ff.__get__(0, int)(42), (int, 42))
self.assertEqual(ff.__get__(0)(42), (int, 42))
# Test super() with classmethods (SF bug 535444)
self.assertEqual(C.goo.__self__, C)
self.assertEqual(D.goo.__self__, D)
self.assertEqual(super(D,D).goo.__self__, D)
self.assertEqual(super(D,d).goo.__self__, D)
self.assertEqual(super(D,D).goo(), (D,))
self.assertEqual(super(D,d).goo(), (D,))
# Verify that a non-callable will raise
meth = classmethod(1).__get__(1)
self.assertRaises(TypeError, meth)
# Verify that classmethod() doesn't allow keyword args
try:
classmethod(f, kw=1)
except TypeError:
pass
else:
self.fail("classmethod shouldn't accept keyword args")
cm = classmethod(f)
self.assertEqual(cm.__dict__, {})
cm.x = 42
self.assertEqual(cm.x, 42)
self.assertEqual(cm.__dict__, {"x" : 42})
del cm.x
self.assertFalse(hasattr(cm, "x"))
@support.impl_detail("the module 'xxsubtype' is internal")
def test_classmethods_in_c(self):
# Testing C-based class methods...
import xxsubtype as spam
a = (1, 2, 3)
d = {'abc': 123}
x, a1, d1 = spam.spamlist.classmeth(*a, **d)
self.assertEqual(x, spam.spamlist)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
x, a1, d1 = spam.spamlist().classmeth(*a, **d)
self.assertEqual(x, spam.spamlist)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
spam_cm = spam.spamlist.__dict__['classmeth']
x2, a2, d2 = spam_cm(spam.spamlist, *a, **d)
self.assertEqual(x2, spam.spamlist)
self.assertEqual(a2, a1)
self.assertEqual(d2, d1)
class SubSpam(spam.spamlist): pass
x2, a2, d2 = spam_cm(SubSpam, *a, **d)
self.assertEqual(x2, SubSpam)
self.assertEqual(a2, a1)
self.assertEqual(d2, d1)
with self.assertRaises(TypeError):
spam_cm()
with self.assertRaises(TypeError):
spam_cm(spam.spamlist())
with self.assertRaises(TypeError):
spam_cm(list)
def test_staticmethods(self):
# Testing static methods...
class C(object):
def foo(*a): return a
goo = staticmethod(foo)
c = C()
self.assertEqual(C.goo(1), (1,))
self.assertEqual(c.goo(1), (1,))
self.assertEqual(c.foo(1), (c, 1,))
class D(C):
pass
d = D()
self.assertEqual(D.goo(1), (1,))
self.assertEqual(d.goo(1), (1,))
self.assertEqual(d.foo(1), (d, 1))
self.assertEqual(D.foo(d, 1), (d, 1))
sm = staticmethod(None)
self.assertEqual(sm.__dict__, {})
sm.x = 42
self.assertEqual(sm.x, 42)
self.assertEqual(sm.__dict__, {"x" : 42})
del sm.x
self.assertFalse(hasattr(sm, "x"))
@support.impl_detail("the module 'xxsubtype' is internal")
def test_staticmethods_in_c(self):
# Testing C-based static methods...
import xxsubtype as spam
a = (1, 2, 3)
d = {"abc": 123}
x, a1, d1 = spam.spamlist.staticmeth(*a, **d)
self.assertEqual(x, None)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
x, a1, d2 = spam.spamlist().staticmeth(*a, **d)
self.assertEqual(x, None)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
def test_classic(self):
# Testing classic classes...
class C:
def foo(*a): return a
goo = classmethod(foo)
c = C()
self.assertEqual(C.goo(1), (C, 1))
self.assertEqual(c.goo(1), (C, 1))
self.assertEqual(c.foo(1), (c, 1))
class D(C):
pass
d = D()
self.assertEqual(D.goo(1), (D, 1))
self.assertEqual(d.goo(1), (D, 1))
self.assertEqual(d.foo(1), (d, 1))
self.assertEqual(D.foo(d, 1), (d, 1))
class E: # *not* subclassing from C
foo = C.foo
self.assertEqual(E().foo.__func__, C.foo) # i.e., unbound
self.assertTrue(repr(C.foo.__get__(C())).startswith("<bound method "))
def test_compattr(self):
# Testing computed attributes...
class C(object):
class computed_attribute(object):
def __init__(self, get, set=None, delete=None):
self.__get = get
self.__set = set
self.__delete = delete
def __get__(self, obj, type=None):
return self.__get(obj)
def __set__(self, obj, value):
return self.__set(obj, value)
def __delete__(self, obj):
return self.__delete(obj)
def __init__(self):
self.__x = 0
def __get_x(self):
x = self.__x
self.__x = x+1
return x
def __set_x(self, x):
self.__x = x
def __delete_x(self):
del self.__x
x = computed_attribute(__get_x, __set_x, __delete_x)
a = C()
self.assertEqual(a.x, 0)
self.assertEqual(a.x, 1)
a.x = 10
self.assertEqual(a.x, 10)
self.assertEqual(a.x, 11)
del a.x
self.assertEqual(hasattr(a, 'x'), 0)
def test_newslots(self):
# Testing __new__ slot override...
class C(list):
def __new__(cls):
self = list.__new__(cls)
self.foo = 1
return self
def __init__(self):
self.foo = self.foo + 2
a = C()
self.assertEqual(a.foo, 3)
self.assertEqual(a.__class__, C)
class D(C):
pass
b = D()
self.assertEqual(b.foo, 3)
self.assertEqual(b.__class__, D)
def test_altmro(self):
# Testing mro() and overriding it...
class A(object):
def f(self): return "A"
class B(A):
pass
class C(A):
def f(self): return "C"
class D(B, C):
pass
self.assertEqual(D.mro(), [D, B, C, A, object])
self.assertEqual(D.__mro__, (D, B, C, A, object))
self.assertEqual(D().f(), "C")
class PerverseMetaType(type):
def mro(cls):
L = type.mro(cls)
L.reverse()
return L
class X(D,B,C,A, metaclass=PerverseMetaType):
pass
self.assertEqual(X.__mro__, (object, A, C, B, D, X))
self.assertEqual(X().f(), "A")
try:
class _metaclass(type):
def mro(self):
return [self, dict, object]
class X(object, metaclass=_metaclass):
pass
# In CPython, the class creation above already raises
# TypeError, as a protection against the fact that
# instances of X would segfault it. In other Python
# implementations it would be ok to let the class X
# be created, but instead get a clean TypeError on the
# __setitem__ below.
x = object.__new__(X)
x[5] = 6
except TypeError:
pass
else:
self.fail("devious mro() return not caught")
try:
class _metaclass(type):
def mro(self):
return [1]
class X(object, metaclass=_metaclass):
pass
except TypeError:
pass
else:
self.fail("non-class mro() return not caught")
try:
class _metaclass(type):
def mro(self):
return 1
class X(object, metaclass=_metaclass):
pass
except TypeError:
pass
else:
self.fail("non-sequence mro() return not caught")
def test_overloading(self):
# Testing operator overloading...
class B(object):
"Intermediate class because object doesn't have a __setattr__"
class C(B):
def __getattr__(self, name):
if name == "foo":
return ("getattr", name)
else:
raise AttributeError
def __setattr__(self, name, value):
if name == "foo":
self.setattr = (name, value)
else:
return B.__setattr__(self, name, value)
def __delattr__(self, name):
if name == "foo":
self.delattr = name
else:
return B.__delattr__(self, name)
def __getitem__(self, key):
return ("getitem", key)
def __setitem__(self, key, value):
self.setitem = (key, value)
def __delitem__(self, key):
self.delitem = key
a = C()
self.assertEqual(a.foo, ("getattr", "foo"))
a.foo = 12
self.assertEqual(a.setattr, ("foo", 12))
del a.foo
self.assertEqual(a.delattr, "foo")
self.assertEqual(a[12], ("getitem", 12))
a[12] = 21
self.assertEqual(a.setitem, (12, 21))
del a[12]
self.assertEqual(a.delitem, 12)
self.assertEqual(a[0:10], ("getitem", slice(0, 10)))
a[0:10] = "foo"
self.assertEqual(a.setitem, (slice(0, 10), "foo"))
del a[0:10]
self.assertEqual(a.delitem, (slice(0, 10)))
def test_methods(self):
# Testing methods...
class C(object):
def __init__(self, x):
self.x = x
def foo(self):
return self.x
c1 = C(1)
self.assertEqual(c1.foo(), 1)
class D(C):
boo = C.foo
goo = c1.foo
d2 = D(2)
self.assertEqual(d2.foo(), 2)
self.assertEqual(d2.boo(), 2)
self.assertEqual(d2.goo(), 1)
class E(object):
foo = C.foo
self.assertEqual(E().foo.__func__, C.foo) # i.e., unbound
self.assertTrue(repr(C.foo.__get__(C(1))).startswith("<bound method "))
def test_special_method_lookup(self):
# The lookup of special methods bypasses __getattr__ and
# __getattribute__, but they still can be descriptors.
def run_context(manager):
with manager:
pass
def iden(self):
return self
def hello(self):
return b"hello"
def empty_seq(self):
return []
def zero(self):
return 0
def complex_num(self):
return 1j
def stop(self):
raise StopIteration
def return_true(self, thing=None):
return True
def do_isinstance(obj):
return isinstance(int, obj)
def do_issubclass(obj):
return issubclass(int, obj)
def do_dict_missing(checker):
class DictSub(checker.__class__, dict):
pass
self.assertEqual(DictSub()["hi"], 4)
def some_number(self_, key):
self.assertEqual(key, "hi")
return 4
def swallow(*args): pass
def format_impl(self, spec):
return "hello"
# It would be nice to have every special method tested here, but I'm
# only listing the ones I can remember outside of typeobject.c, since it
# does it right.
specials = [
("__bytes__", bytes, hello, set(), {}),
("__reversed__", reversed, empty_seq, set(), {}),
("__length_hint__", list, zero, set(),
{"__iter__" : iden, "__next__" : stop}),
("__sizeof__", sys.getsizeof, zero, set(), {}),
("__instancecheck__", do_isinstance, return_true, set(), {}),
("__missing__", do_dict_missing, some_number,
set(("__class__",)), {}),
("__subclasscheck__", do_issubclass, return_true,
set(("__bases__",)), {}),
("__enter__", run_context, iden, set(), {"__exit__" : swallow}),
("__exit__", run_context, swallow, set(), {"__enter__" : iden}),
("__complex__", complex, complex_num, set(), {}),
("__format__", format, format_impl, set(), {}),
("__floor__", math.floor, zero, set(), {}),
("__trunc__", math.trunc, zero, set(), {}),
("__trunc__", int, zero, set(), {}),
("__ceil__", math.ceil, zero, set(), {}),
("__dir__", dir, empty_seq, set(), {}),
]
class Checker(object):
def __getattr__(self, attr, test=self):
test.fail("__getattr__ called with {0}".format(attr))
def __getattribute__(self, attr, test=self):
if attr not in ok:
test.fail("__getattribute__ called with {0}".format(attr))
return object.__getattribute__(self, attr)
class SpecialDescr(object):
def __init__(self, impl):
self.impl = impl
def __get__(self, obj, owner):
record.append(1)
return self.impl.__get__(obj, owner)
class MyException(Exception):
pass
class ErrDescr(object):
def __get__(self, obj, owner):
raise MyException
for name, runner, meth_impl, ok, env in specials:
class X(Checker):
pass
for attr, obj in env.items():
setattr(X, attr, obj)
setattr(X, name, meth_impl)
runner(X())
record = []
class X(Checker):
pass
for attr, obj in env.items():
setattr(X, attr, obj)
setattr(X, name, SpecialDescr(meth_impl))
runner(X())
self.assertEqual(record, [1], name)
class X(Checker):
pass
for attr, obj in env.items():
setattr(X, attr, obj)
setattr(X, name, ErrDescr())
self.assertRaises(MyException, runner, X())
def test_specials(self):
# Testing special operators...
# Test operators like __hash__ for which a built-in default exists
# Test the default behavior for static classes
class C(object):
def __getitem__(self, i):
if 0 <= i < 10: return i
raise IndexError
c1 = C()
c2 = C()
self.assertTrue(not not c1) # What?
self.assertNotEqual(id(c1), id(c2))
hash(c1)
hash(c2)
self.assertEqual(c1, c1)
self.assertTrue(c1 != c2)
self.assertTrue(not c1 != c1)
self.assertTrue(not c1 == c2)
# Note that the module name appears in str/repr, and that varies
# depending on whether this test is run standalone or from a framework.
self.assertTrue(str(c1).find('C object at ') >= 0)
self.assertEqual(str(c1), repr(c1))
self.assertNotIn(-1, c1)
for i in range(10):
self.assertIn(i, c1)
self.assertNotIn(10, c1)
# Test the default behavior for dynamic classes
class D(object):
def __getitem__(self, i):
if 0 <= i < 10: return i
raise IndexError
d1 = D()
d2 = D()
self.assertTrue(not not d1)
self.assertNotEqual(id(d1), id(d2))
hash(d1)
hash(d2)
self.assertEqual(d1, d1)
self.assertNotEqual(d1, d2)
self.assertTrue(not d1 != d1)
self.assertTrue(not d1 == d2)
# Note that the module name appears in str/repr, and that varies
# depending on whether this test is run standalone or from a framework.
self.assertTrue(str(d1).find('D object at ') >= 0)
self.assertEqual(str(d1), repr(d1))
self.assertNotIn(-1, d1)
for i in range(10):
self.assertIn(i, d1)
self.assertNotIn(10, d1)
# Test overridden behavior
class Proxy(object):
def __init__(self, x):
self.x = x
def __bool__(self):
return not not self.x
def __hash__(self):
return hash(self.x)
def __eq__(self, other):
return self.x == other
def __ne__(self, other):
return self.x != other
def __ge__(self, other):
return self.x >= other
def __gt__(self, other):
return self.x > other
def __le__(self, other):
return self.x <= other
def __lt__(self, other):
return self.x < other
def __str__(self):
return "Proxy:%s" % self.x
def __repr__(self):
return "Proxy(%r)" % self.x
def __contains__(self, value):
return value in self.x
p0 = Proxy(0)
p1 = Proxy(1)
p_1 = Proxy(-1)
self.assertFalse(p0)
self.assertTrue(not not p1)
self.assertEqual(hash(p0), hash(0))
self.assertEqual(p0, p0)
self.assertNotEqual(p0, p1)
self.assertTrue(not p0 != p0)
self.assertEqual(not p0, p1)
self.assertTrue(p0 < p1)
self.assertTrue(p0 <= p1)
self.assertTrue(p1 > p0)
self.assertTrue(p1 >= p0)
self.assertEqual(str(p0), "Proxy:0")
self.assertEqual(repr(p0), "Proxy(0)")
p10 = Proxy(range(10))
self.assertNotIn(-1, p10)
for i in range(10):
self.assertIn(i, p10)
self.assertNotIn(10, p10)
def test_weakrefs(self):
# Testing weak references...
import weakref
class C(object):
pass
c = C()
r = weakref.ref(c)
self.assertEqual(r(), c)
del c
support.gc_collect()
self.assertEqual(r(), None)
del r
class NoWeak(object):
__slots__ = ['foo']
no = NoWeak()
try:
weakref.ref(no)
except TypeError as msg:
self.assertTrue(str(msg).find("weak reference") >= 0)
else:
self.fail("weakref.ref(no) should be illegal")
class Weak(object):
__slots__ = ['foo', '__weakref__']
yes = Weak()
r = weakref.ref(yes)
self.assertEqual(r(), yes)
del yes
support.gc_collect()
self.assertEqual(r(), None)
del r
def test_properties(self):
# Testing property...
class C(object):
def getx(self):
return self.__x
def setx(self, value):
self.__x = value
def delx(self):
del self.__x
x = property(getx, setx, delx, doc="I'm the x property.")
a = C()
self.assertFalse(hasattr(a, "x"))
a.x = 42
self.assertEqual(a._C__x, 42)
self.assertEqual(a.x, 42)
del a.x
self.assertFalse(hasattr(a, "x"))
self.assertFalse(hasattr(a, "_C__x"))
C.x.__set__(a, 100)
self.assertEqual(C.x.__get__(a), 100)
C.x.__delete__(a)
self.assertFalse(hasattr(a, "x"))
raw = C.__dict__['x']
self.assertIsInstance(raw, property)
attrs = dir(raw)
self.assertIn("__doc__", attrs)
self.assertIn("fget", attrs)
self.assertIn("fset", attrs)
self.assertIn("fdel", attrs)
self.assertEqual(raw.__doc__, "I'm the x property.")
self.assertTrue(raw.fget is C.__dict__['getx'])
self.assertTrue(raw.fset is C.__dict__['setx'])
self.assertTrue(raw.fdel is C.__dict__['delx'])
for attr in "__doc__", "fget", "fset", "fdel":
try:
setattr(raw, attr, 42)
except AttributeError as msg:
if str(msg).find('readonly') < 0:
self.fail("when setting readonly attr %r on a property, "
"got unexpected AttributeError msg %r" % (attr, str(msg)))
else:
self.fail("expected AttributeError from trying to set readonly %r "
"attr on a property" % attr)
class D(object):
__getitem__ = property(lambda s: 1/0)
d = D()
try:
for i in d:
str(i)
except ZeroDivisionError:
pass
else:
self.fail("expected ZeroDivisionError from bad property")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_properties_doc_attrib(self):
class E(object):
def getter(self):
"getter method"
return 0
def setter(self_, value):
"setter method"
pass
prop = property(getter)
self.assertEqual(prop.__doc__, "getter method")
prop2 = property(fset=setter)
self.assertEqual(prop2.__doc__, None)
def test_testcapi_no_segfault(self):
# this segfaulted in 2.5b2
try:
import _testcapi
except ImportError:
pass
else:
class X(object):
p = property(_testcapi.test_with_docstring)
def test_properties_plus(self):
class C(object):
foo = property(doc="hello")
@foo.getter
def foo(self):
return self._foo
@foo.setter
def foo(self, value):
self._foo = abs(value)
@foo.deleter
def foo(self):
del self._foo
c = C()
self.assertEqual(C.foo.__doc__, "hello")
self.assertFalse(hasattr(c, "foo"))
c.foo = -42
self.assertTrue(hasattr(c, '_foo'))
self.assertEqual(c._foo, 42)
self.assertEqual(c.foo, 42)
del c.foo
self.assertFalse(hasattr(c, '_foo'))
self.assertFalse(hasattr(c, "foo"))
class D(C):
@C.foo.deleter
def foo(self):
try:
del self._foo
except AttributeError:
pass
d = D()
d.foo = 24
self.assertEqual(d.foo, 24)
del d.foo
del d.foo
class E(object):
@property
def foo(self):
return self._foo
@foo.setter
def foo(self, value):
raise RuntimeError
@foo.setter
def foo(self, value):
self._foo = abs(value)
@foo.deleter
def foo(self, value=None):
del self._foo
e = E()
e.foo = -42
self.assertEqual(e.foo, 42)
del e.foo
class F(E):
@E.foo.deleter
def foo(self):
del self._foo
@foo.setter
def foo(self, value):
self._foo = max(0, value)
f = F()
f.foo = -10
self.assertEqual(f.foo, 0)
del f.foo
def test_dict_constructors(self):
# Testing dict constructor ...
d = dict()
self.assertEqual(d, {})
d = dict({})
self.assertEqual(d, {})
d = dict({1: 2, 'a': 'b'})
self.assertEqual(d, {1: 2, 'a': 'b'})
self.assertEqual(d, dict(list(d.items())))
self.assertEqual(d, dict(iter(d.items())))
d = dict({'one':1, 'two':2})
self.assertEqual(d, dict(one=1, two=2))
self.assertEqual(d, dict(**d))
self.assertEqual(d, dict({"one": 1}, two=2))
self.assertEqual(d, dict([("two", 2)], one=1))
self.assertEqual(d, dict([("one", 100), ("two", 200)], **d))
self.assertEqual(d, dict(**d))
for badarg in 0, 0, 0j, "0", [0], (0,):
try:
dict(badarg)
except TypeError:
pass
except ValueError:
if badarg == "0":
# It's a sequence, and its elements are also sequences (gotta
# love strings <wink>), but they aren't of length 2, so this
# one seemed better as a ValueError than a TypeError.
pass
else:
self.fail("no TypeError from dict(%r)" % badarg)
else:
self.fail("no TypeError from dict(%r)" % badarg)
try:
dict({}, {})
except TypeError:
pass
else:
self.fail("no TypeError from dict({}, {})")
class Mapping:
# Lacks a .keys() method; will be added later.
dict = {1:2, 3:4, 'a':1j}
try:
dict(Mapping())
except TypeError:
pass
else:
self.fail("no TypeError from dict(incomplete mapping)")
Mapping.keys = lambda self: list(self.dict.keys())
Mapping.__getitem__ = lambda self, i: self.dict[i]
d = dict(Mapping())
self.assertEqual(d, Mapping.dict)
# Init from sequence of iterable objects, each producing a 2-sequence.
class AddressBookEntry:
def __init__(self, first, last):
self.first = first
self.last = last
def __iter__(self):
return iter([self.first, self.last])
d = dict([AddressBookEntry('Tim', 'Warsaw'),
AddressBookEntry('Barry', 'Peters'),
AddressBookEntry('Tim', 'Peters'),
AddressBookEntry('Barry', 'Warsaw')])
self.assertEqual(d, {'Barry': 'Warsaw', 'Tim': 'Peters'})
d = dict(zip(range(4), range(1, 5)))
self.assertEqual(d, dict([(i, i+1) for i in range(4)]))
# Bad sequence lengths.
for bad in [('tooshort',)], [('too', 'long', 'by 1')]:
try:
dict(bad)
except ValueError:
pass
else:
self.fail("no ValueError from dict(%r)" % bad)
def test_dir(self):
# Testing dir() ...
junk = 12
self.assertEqual(dir(), ['junk', 'self'])
del junk
# Just make sure these don't blow up!
for arg in 2, 2, 2j, 2e0, [2], "2", b"2", (2,), {2:2}, type, self.test_dir:
dir(arg)
# Test dir on new-style classes. Since these have object as a
# base class, a lot more gets sucked in.
def interesting(strings):
return [s for s in strings if not s.startswith('_')]
class C(object):
Cdata = 1
def Cmethod(self): pass
cstuff = ['Cdata', 'Cmethod']
self.assertEqual(interesting(dir(C)), cstuff)
c = C()
self.assertEqual(interesting(dir(c)), cstuff)
## self.assertIn('__self__', dir(C.Cmethod))
c.cdata = 2
c.cmethod = lambda self: 0
self.assertEqual(interesting(dir(c)), cstuff + ['cdata', 'cmethod'])
## self.assertIn('__self__', dir(c.Cmethod))
class A(C):
Adata = 1
def Amethod(self): pass
astuff = ['Adata', 'Amethod'] + cstuff
self.assertEqual(interesting(dir(A)), astuff)
## self.assertIn('__self__', dir(A.Amethod))
a = A()
self.assertEqual(interesting(dir(a)), astuff)
a.adata = 42
a.amethod = lambda self: 3
self.assertEqual(interesting(dir(a)), astuff + ['adata', 'amethod'])
## self.assertIn('__self__', dir(a.Amethod))
# Try a module subclass.
class M(type(sys)):
pass
minstance = M("m")
minstance.b = 2
minstance.a = 1
names = [x for x in dir(minstance) if x not in ["__name__", "__doc__"]]
self.assertEqual(names, ['a', 'b'])
class M2(M):
def getdict(self):
return "Not a dict!"
__dict__ = property(getdict)
m2instance = M2("m2")
m2instance.b = 2
m2instance.a = 1
self.assertEqual(m2instance.__dict__, "Not a dict!")
try:
dir(m2instance)
except TypeError:
pass
# Two essentially featureless objects, just inheriting stuff from
# object.
self.assertEqual(dir(NotImplemented), dir(Ellipsis))
# Nasty test case for proxied objects
class Wrapper(object):
def __init__(self, obj):
self.__obj = obj
def __repr__(self):
return "Wrapper(%s)" % repr(self.__obj)
def __getitem__(self, key):
return Wrapper(self.__obj[key])
def __len__(self):
return len(self.__obj)
def __getattr__(self, name):
return Wrapper(getattr(self.__obj, name))
class C(object):
def __getclass(self):
return Wrapper(type(self))
__class__ = property(__getclass)
dir(C()) # This used to segfault
def test_supers(self):
# Testing super...
class A(object):
def meth(self, a):
return "A(%r)" % a
self.assertEqual(A().meth(1), "A(1)")
class B(A):
def __init__(self):
self.__super = super(B, self)
def meth(self, a):
return "B(%r)" % a + self.__super.meth(a)
self.assertEqual(B().meth(2), "B(2)A(2)")
class C(A):
def meth(self, a):
return "C(%r)" % a + self.__super.meth(a)
C._C__super = super(C)
self.assertEqual(C().meth(3), "C(3)A(3)")
class D(C, B):
def meth(self, a):
return "D(%r)" % a + super(D, self).meth(a)
self.assertEqual(D().meth(4), "D(4)C(4)B(4)A(4)")
# Test for subclassing super
class mysuper(super):
def __init__(self, *args):
return super(mysuper, self).__init__(*args)
class E(D):
def meth(self, a):
return "E(%r)" % a + mysuper(E, self).meth(a)
self.assertEqual(E().meth(5), "E(5)D(5)C(5)B(5)A(5)")
class F(E):
def meth(self, a):
s = self.__super # == mysuper(F, self)
return "F(%r)[%s]" % (a, s.__class__.__name__) + s.meth(a)
F._F__super = mysuper(F)
self.assertEqual(F().meth(6), "F(6)[mysuper]E(6)D(6)C(6)B(6)A(6)")
# Make sure certain errors are raised
try:
super(D, 42)
except TypeError:
pass
else:
self.fail("shouldn't allow super(D, 42)")
try:
super(D, C())
except TypeError:
pass
else:
self.fail("shouldn't allow super(D, C())")
try:
super(D).__get__(12)
except TypeError:
pass
else:
self.fail("shouldn't allow super(D).__get__(12)")
try:
super(D).__get__(C())
except TypeError:
pass
else:
self.fail("shouldn't allow super(D).__get__(C())")
# Make sure data descriptors can be overridden and accessed via super
# (new feature in Python 2.3)
class DDbase(object):
def getx(self): return 42
x = property(getx)
class DDsub(DDbase):
def getx(self): return "hello"
x = property(getx)
dd = DDsub()
self.assertEqual(dd.x, "hello")
self.assertEqual(super(DDsub, dd).x, 42)
# Ensure that super() lookup of descriptor from classmethod
# works (SF ID# 743627)
class Base(object):
aProp = property(lambda self: "foo")
class Sub(Base):
@classmethod
def test(klass):
return super(Sub,klass).aProp
self.assertEqual(Sub.test(), Base.aProp)
# Verify that super() doesn't allow keyword args
try:
super(Base, kw=1)
except TypeError:
pass
else:
self.assertEqual("super shouldn't accept keyword args")
def test_basic_inheritance(self):
# Testing inheritance from basic types...
class hexint(int):
def __repr__(self):
return hex(self)
def __add__(self, other):
return hexint(int.__add__(self, other))
# (Note that overriding __radd__ doesn't work,
# because the int type gets first dibs.)
self.assertEqual(repr(hexint(7) + 9), "0x10")
self.assertEqual(repr(hexint(1000) + 7), "0x3ef")
a = hexint(12345)
self.assertEqual(a, 12345)
self.assertEqual(int(a), 12345)
self.assertTrue(int(a).__class__ is int)
self.assertEqual(hash(a), hash(12345))
self.assertTrue((+a).__class__ is int)
self.assertTrue((a >> 0).__class__ is int)
self.assertTrue((a << 0).__class__ is int)
self.assertTrue((hexint(0) << 12).__class__ is int)
self.assertTrue((hexint(0) >> 12).__class__ is int)
class octlong(int):
__slots__ = []
def __str__(self):
return oct(self)
def __add__(self, other):
return self.__class__(super(octlong, self).__add__(other))
__radd__ = __add__
self.assertEqual(str(octlong(3) + 5), "0o10")
# (Note that overriding __radd__ here only seems to work
# because the example uses a short int left argument.)
self.assertEqual(str(5 + octlong(3000)), "0o5675")
a = octlong(12345)
self.assertEqual(a, 12345)
self.assertEqual(int(a), 12345)
self.assertEqual(hash(a), hash(12345))
self.assertTrue(int(a).__class__ is int)
self.assertTrue((+a).__class__ is int)
self.assertTrue((-a).__class__ is int)
self.assertTrue((-octlong(0)).__class__ is int)
self.assertTrue((a >> 0).__class__ is int)
self.assertTrue((a << 0).__class__ is int)
self.assertTrue((a - 0).__class__ is int)
self.assertTrue((a * 1).__class__ is int)
self.assertTrue((a ** 1).__class__ is int)
self.assertTrue((a // 1).__class__ is int)
self.assertTrue((1 * a).__class__ is int)
self.assertTrue((a | 0).__class__ is int)
self.assertTrue((a ^ 0).__class__ is int)
self.assertTrue((a & -1).__class__ is int)
self.assertTrue((octlong(0) << 12).__class__ is int)
self.assertTrue((octlong(0) >> 12).__class__ is int)
self.assertTrue(abs(octlong(0)).__class__ is int)
# Because octlong overrides __add__, we can't check the absence of +0
# optimizations using octlong.
class longclone(int):
pass
a = longclone(1)
self.assertTrue((a + 0).__class__ is int)
self.assertTrue((0 + a).__class__ is int)
# Check that negative clones don't segfault
a = longclone(-1)
self.assertEqual(a.__dict__, {})
self.assertEqual(int(a), -1) # self.assertTrue PyNumber_Long() copies the sign bit
class precfloat(float):
__slots__ = ['prec']
def __init__(self, value=0.0, prec=12):
self.prec = int(prec)
def __repr__(self):
return "%.*g" % (self.prec, self)
self.assertEqual(repr(precfloat(1.1)), "1.1")
a = precfloat(12345)
self.assertEqual(a, 12345.0)
self.assertEqual(float(a), 12345.0)
self.assertTrue(float(a).__class__ is float)
self.assertEqual(hash(a), hash(12345.0))
self.assertTrue((+a).__class__ is float)
class madcomplex(complex):
def __repr__(self):
return "%.17gj%+.17g" % (self.imag, self.real)
a = madcomplex(-3, 4)
self.assertEqual(repr(a), "4j-3")
base = complex(-3, 4)
self.assertEqual(base.__class__, complex)
self.assertEqual(a, base)
self.assertEqual(complex(a), base)
self.assertEqual(complex(a).__class__, complex)
a = madcomplex(a) # just trying another form of the constructor
self.assertEqual(repr(a), "4j-3")
self.assertEqual(a, base)
self.assertEqual(complex(a), base)
self.assertEqual(complex(a).__class__, complex)
self.assertEqual(hash(a), hash(base))
self.assertEqual((+a).__class__, complex)
self.assertEqual((a + 0).__class__, complex)
self.assertEqual(a + 0, base)
self.assertEqual((a - 0).__class__, complex)
self.assertEqual(a - 0, base)
self.assertEqual((a * 1).__class__, complex)
self.assertEqual(a * 1, base)
self.assertEqual((a / 1).__class__, complex)
self.assertEqual(a / 1, base)
class madtuple(tuple):
_rev = None
def rev(self):
if self._rev is not None:
return self._rev
L = list(self)
L.reverse()
self._rev = self.__class__(L)
return self._rev
a = madtuple((1,2,3,4,5,6,7,8,9,0))
self.assertEqual(a, (1,2,3,4,5,6,7,8,9,0))
self.assertEqual(a.rev(), madtuple((0,9,8,7,6,5,4,3,2,1)))
self.assertEqual(a.rev().rev(), madtuple((1,2,3,4,5,6,7,8,9,0)))
for i in range(512):
t = madtuple(range(i))
u = t.rev()
v = u.rev()
self.assertEqual(v, t)
a = madtuple((1,2,3,4,5))
self.assertEqual(tuple(a), (1,2,3,4,5))
self.assertTrue(tuple(a).__class__ is tuple)
self.assertEqual(hash(a), hash((1,2,3,4,5)))
self.assertTrue(a[:].__class__ is tuple)
self.assertTrue((a * 1).__class__ is tuple)
self.assertTrue((a * 0).__class__ is tuple)
self.assertTrue((a + ()).__class__ is tuple)
a = madtuple(())
self.assertEqual(tuple(a), ())
self.assertTrue(tuple(a).__class__ is tuple)
self.assertTrue((a + a).__class__ is tuple)
self.assertTrue((a * 0).__class__ is tuple)
self.assertTrue((a * 1).__class__ is tuple)
self.assertTrue((a * 2).__class__ is tuple)
self.assertTrue(a[:].__class__ is tuple)
class madstring(str):
_rev = None
def rev(self):
if self._rev is not None:
return self._rev
L = list(self)
L.reverse()
self._rev = self.__class__("".join(L))
return self._rev
s = madstring("abcdefghijklmnopqrstuvwxyz")
self.assertEqual(s, "abcdefghijklmnopqrstuvwxyz")
self.assertEqual(s.rev(), madstring("zyxwvutsrqponmlkjihgfedcba"))
self.assertEqual(s.rev().rev(), madstring("abcdefghijklmnopqrstuvwxyz"))
for i in range(256):
s = madstring("".join(map(chr, range(i))))
t = s.rev()
u = t.rev()
self.assertEqual(u, s)
s = madstring("12345")
self.assertEqual(str(s), "12345")
self.assertTrue(str(s).__class__ is str)
base = "\x00" * 5
s = madstring(base)
self.assertEqual(s, base)
self.assertEqual(str(s), base)
self.assertTrue(str(s).__class__ is str)
self.assertEqual(hash(s), hash(base))
self.assertEqual({s: 1}[base], 1)
self.assertEqual({base: 1}[s], 1)
self.assertTrue((s + "").__class__ is str)
self.assertEqual(s + "", base)
self.assertTrue(("" + s).__class__ is str)
self.assertEqual("" + s, base)
self.assertTrue((s * 0).__class__ is str)
self.assertEqual(s * 0, "")
self.assertTrue((s * 1).__class__ is str)
self.assertEqual(s * 1, base)
self.assertTrue((s * 2).__class__ is str)
self.assertEqual(s * 2, base + base)
self.assertTrue(s[:].__class__ is str)
self.assertEqual(s[:], base)
self.assertTrue(s[0:0].__class__ is str)
self.assertEqual(s[0:0], "")
self.assertTrue(s.strip().__class__ is str)
self.assertEqual(s.strip(), base)
self.assertTrue(s.lstrip().__class__ is str)
self.assertEqual(s.lstrip(), base)
self.assertTrue(s.rstrip().__class__ is str)
self.assertEqual(s.rstrip(), base)
identitytab = {}
self.assertTrue(s.translate(identitytab).__class__ is str)
self.assertEqual(s.translate(identitytab), base)
self.assertTrue(s.replace("x", "x").__class__ is str)
self.assertEqual(s.replace("x", "x"), base)
self.assertTrue(s.ljust(len(s)).__class__ is str)
self.assertEqual(s.ljust(len(s)), base)
self.assertTrue(s.rjust(len(s)).__class__ is str)
self.assertEqual(s.rjust(len(s)), base)
self.assertTrue(s.center(len(s)).__class__ is str)
self.assertEqual(s.center(len(s)), base)
self.assertTrue(s.lower().__class__ is str)
self.assertEqual(s.lower(), base)
class madunicode(str):
_rev = None
def rev(self):
if self._rev is not None:
return self._rev
L = list(self)
L.reverse()
self._rev = self.__class__("".join(L))
return self._rev
u = madunicode("ABCDEF")
self.assertEqual(u, "ABCDEF")
self.assertEqual(u.rev(), madunicode("FEDCBA"))
self.assertEqual(u.rev().rev(), madunicode("ABCDEF"))
base = "12345"
u = madunicode(base)
self.assertEqual(str(u), base)
self.assertTrue(str(u).__class__ is str)
self.assertEqual(hash(u), hash(base))
self.assertEqual({u: 1}[base], 1)
self.assertEqual({base: 1}[u], 1)
self.assertTrue(u.strip().__class__ is str)
self.assertEqual(u.strip(), base)
self.assertTrue(u.lstrip().__class__ is str)
self.assertEqual(u.lstrip(), base)
self.assertTrue(u.rstrip().__class__ is str)
self.assertEqual(u.rstrip(), base)
self.assertTrue(u.replace("x", "x").__class__ is str)
self.assertEqual(u.replace("x", "x"), base)
self.assertTrue(u.replace("xy", "xy").__class__ is str)
self.assertEqual(u.replace("xy", "xy"), base)
self.assertTrue(u.center(len(u)).__class__ is str)
self.assertEqual(u.center(len(u)), base)
self.assertTrue(u.ljust(len(u)).__class__ is str)
self.assertEqual(u.ljust(len(u)), base)
self.assertTrue(u.rjust(len(u)).__class__ is str)
self.assertEqual(u.rjust(len(u)), base)
self.assertTrue(u.lower().__class__ is str)
self.assertEqual(u.lower(), base)
self.assertTrue(u.upper().__class__ is str)
self.assertEqual(u.upper(), base)
self.assertTrue(u.capitalize().__class__ is str)
self.assertEqual(u.capitalize(), base)
self.assertTrue(u.title().__class__ is str)
self.assertEqual(u.title(), base)
self.assertTrue((u + "").__class__ is str)
self.assertEqual(u + "", base)
self.assertTrue(("" + u).__class__ is str)
self.assertEqual("" + u, base)
self.assertTrue((u * 0).__class__ is str)
self.assertEqual(u * 0, "")
self.assertTrue((u * 1).__class__ is str)
self.assertEqual(u * 1, base)
self.assertTrue((u * 2).__class__ is str)
self.assertEqual(u * 2, base + base)
self.assertTrue(u[:].__class__ is str)
self.assertEqual(u[:], base)
self.assertTrue(u[0:0].__class__ is str)
self.assertEqual(u[0:0], "")
class sublist(list):
pass
a = sublist(range(5))
self.assertEqual(a, list(range(5)))
a.append("hello")
self.assertEqual(a, list(range(5)) + ["hello"])
a[5] = 5
self.assertEqual(a, list(range(6)))
a.extend(range(6, 20))
self.assertEqual(a, list(range(20)))
a[-5:] = []
self.assertEqual(a, list(range(15)))
del a[10:15]
self.assertEqual(len(a), 10)
self.assertEqual(a, list(range(10)))
self.assertEqual(list(a), list(range(10)))
self.assertEqual(a[0], 0)
self.assertEqual(a[9], 9)
self.assertEqual(a[-10], 0)
self.assertEqual(a[-1], 9)
self.assertEqual(a[:5], list(range(5)))
## class CountedInput(file):
## """Counts lines read by self.readline().
##
## self.lineno is the 0-based ordinal of the last line read, up to
## a maximum of one greater than the number of lines in the file.
##
## self.ateof is true if and only if the final "" line has been read,
## at which point self.lineno stops incrementing, and further calls
## to readline() continue to return "".
## """
##
## lineno = 0
## ateof = 0
## def readline(self):
## if self.ateof:
## return ""
## s = file.readline(self)
## # Next line works too.
## # s = super(CountedInput, self).readline()
## self.lineno += 1
## if s == "":
## self.ateof = 1
## return s
##
## f = file(name=support.TESTFN, mode='w')
## lines = ['a\n', 'b\n', 'c\n']
## try:
## f.writelines(lines)
## f.close()
## f = CountedInput(support.TESTFN)
## for (i, expected) in zip(range(1, 5) + [4], lines + 2 * [""]):
## got = f.readline()
## self.assertEqual(expected, got)
## self.assertEqual(f.lineno, i)
## self.assertEqual(f.ateof, (i > len(lines)))
## f.close()
## finally:
## try:
## f.close()
## except:
## pass
## support.unlink(support.TESTFN)
def test_keywords(self):
# Testing keyword args to basic type constructors ...
self.assertEqual(int(x=1), 1)
self.assertEqual(float(x=2), 2.0)
self.assertEqual(int(x=3), 3)
self.assertEqual(complex(imag=42, real=666), complex(666, 42))
self.assertEqual(str(object=500), '500')
self.assertEqual(str(object=b'abc', errors='strict'), 'abc')
self.assertEqual(tuple(sequence=range(3)), (0, 1, 2))
self.assertEqual(list(sequence=(0, 1, 2)), list(range(3)))
# note: as of Python 2.3, dict() no longer has an "items" keyword arg
for constructor in (int, float, int, complex, str, str,
tuple, list):
try:
constructor(bogus_keyword_arg=1)
except TypeError:
pass
else:
self.fail("expected TypeError from bogus keyword argument to %r"
% constructor)
def test_str_subclass_as_dict_key(self):
# Testing a str subclass used as dict key ..
class cistr(str):
"""Sublcass of str that computes __eq__ case-insensitively.
Also computes a hash code of the string in canonical form.
"""
def __init__(self, value):
self.canonical = value.lower()
self.hashcode = hash(self.canonical)
def __eq__(self, other):
if not isinstance(other, cistr):
other = cistr(other)
return self.canonical == other.canonical
def __hash__(self):
return self.hashcode
self.assertEqual(cistr('ABC'), 'abc')
self.assertEqual('aBc', cistr('ABC'))
self.assertEqual(str(cistr('ABC')), 'ABC')
d = {cistr('one'): 1, cistr('two'): 2, cistr('tHree'): 3}
self.assertEqual(d[cistr('one')], 1)
self.assertEqual(d[cistr('tWo')], 2)
self.assertEqual(d[cistr('THrEE')], 3)
self.assertIn(cistr('ONe'), d)
self.assertEqual(d.get(cistr('thrEE')), 3)
def test_classic_comparisons(self):
# Testing classic comparisons...
class classic:
pass
for base in (classic, int, object):
class C(base):
def __init__(self, value):
self.value = int(value)
def __eq__(self, other):
if isinstance(other, C):
return self.value == other.value
if isinstance(other, int) or isinstance(other, int):
return self.value == other
return NotImplemented
def __ne__(self, other):
if isinstance(other, C):
return self.value != other.value
if isinstance(other, int) or isinstance(other, int):
return self.value != other
return NotImplemented
def __lt__(self, other):
if isinstance(other, C):
return self.value < other.value
if isinstance(other, int) or isinstance(other, int):
return self.value < other
return NotImplemented
def __le__(self, other):
if isinstance(other, C):
return self.value <= other.value
if isinstance(other, int) or isinstance(other, int):
return self.value <= other
return NotImplemented
def __gt__(self, other):
if isinstance(other, C):
return self.value > other.value
if isinstance(other, int) or isinstance(other, int):
return self.value > other
return NotImplemented
def __ge__(self, other):
if isinstance(other, C):
return self.value >= other.value
if isinstance(other, int) or isinstance(other, int):
return self.value >= other
return NotImplemented
c1 = C(1)
c2 = C(2)
c3 = C(3)
self.assertEqual(c1, 1)
c = {1: c1, 2: c2, 3: c3}
for x in 1, 2, 3:
for y in 1, 2, 3:
for op in "<", "<=", "==", "!=", ">", ">=":
self.assertTrue(eval("c[x] %s c[y]" % op) ==
eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
self.assertTrue(eval("c[x] %s y" % op) ==
eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
self.assertTrue(eval("x %s c[y]" % op) ==
eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
def test_rich_comparisons(self):
# Testing rich comparisons...
class Z(complex):
pass
z = Z(1)
self.assertEqual(z, 1+0j)
self.assertEqual(1+0j, z)
class ZZ(complex):
def __eq__(self, other):
try:
return abs(self - other) <= 1e-6
except:
return NotImplemented
zz = ZZ(1.0000003)
self.assertEqual(zz, 1+0j)
self.assertEqual(1+0j, zz)
class classic:
pass
for base in (classic, int, object, list):
class C(base):
def __init__(self, value):
self.value = int(value)
def __cmp__(self_, other):
self.fail("shouldn't call __cmp__")
def __eq__(self, other):
if isinstance(other, C):
return self.value == other.value
if isinstance(other, int) or isinstance(other, int):
return self.value == other
return NotImplemented
def __ne__(self, other):
if isinstance(other, C):
return self.value != other.value
if isinstance(other, int) or isinstance(other, int):
return self.value != other
return NotImplemented
def __lt__(self, other):
if isinstance(other, C):
return self.value < other.value
if isinstance(other, int) or isinstance(other, int):
return self.value < other
return NotImplemented
def __le__(self, other):
if isinstance(other, C):
return self.value <= other.value
if isinstance(other, int) or isinstance(other, int):
return self.value <= other
return NotImplemented
def __gt__(self, other):
if isinstance(other, C):
return self.value > other.value
if isinstance(other, int) or isinstance(other, int):
return self.value > other
return NotImplemented
def __ge__(self, other):
if isinstance(other, C):
return self.value >= other.value
if isinstance(other, int) or isinstance(other, int):
return self.value >= other
return NotImplemented
c1 = C(1)
c2 = C(2)
c3 = C(3)
self.assertEqual(c1, 1)
c = {1: c1, 2: c2, 3: c3}
for x in 1, 2, 3:
for y in 1, 2, 3:
for op in "<", "<=", "==", "!=", ">", ">=":
self.assertTrue(eval("c[x] %s c[y]" % op) == eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
self.assertTrue(eval("c[x] %s y" % op) == eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
self.assertTrue(eval("x %s c[y]" % op) == eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
def test_descrdoc(self):
# Testing descriptor doc strings...
from _io import FileIO
def check(descr, what):
self.assertEqual(descr.__doc__, what)
check(FileIO.closed, "True if the file is closed") # getset descriptor
check(complex.real, "the real part of a complex number") # member descriptor
def test_doc_descriptor(self):
# Testing __doc__ descriptor...
# SF bug 542984
class DocDescr(object):
def __get__(self, object, otype):
if object:
object = object.__class__.__name__ + ' instance'
if otype:
otype = otype.__name__
return 'object=%s; type=%s' % (object, otype)
class OldClass:
__doc__ = DocDescr()
class NewClass(object):
__doc__ = DocDescr()
self.assertEqual(OldClass.__doc__, 'object=None; type=OldClass')
self.assertEqual(OldClass().__doc__, 'object=OldClass instance; type=OldClass')
self.assertEqual(NewClass.__doc__, 'object=None; type=NewClass')
self.assertEqual(NewClass().__doc__, 'object=NewClass instance; type=NewClass')
def test_set_class(self):
# Testing __class__ assignment...
class C(object): pass
class D(object): pass
class E(object): pass
class F(D, E): pass
for cls in C, D, E, F:
for cls2 in C, D, E, F:
x = cls()
x.__class__ = cls2
self.assertTrue(x.__class__ is cls2)
x.__class__ = cls
self.assertTrue(x.__class__ is cls)
def cant(x, C):
try:
x.__class__ = C
except TypeError:
pass
else:
self.fail("shouldn't allow %r.__class__ = %r" % (x, C))
try:
delattr(x, "__class__")
except (TypeError, AttributeError):
pass
else:
self.fail("shouldn't allow del %r.__class__" % x)
cant(C(), list)
cant(list(), C)
cant(C(), 1)
cant(C(), object)
cant(object(), list)
cant(list(), object)
class Int(int): __slots__ = []
cant(2, Int)
cant(Int(), int)
cant(True, int)
cant(2, bool)
o = object()
cant(o, type(1))
cant(o, type(None))
del o
class G(object):
__slots__ = ["a", "b"]
class H(object):
__slots__ = ["b", "a"]
class I(object):
__slots__ = ["a", "b"]
class J(object):
__slots__ = ["c", "b"]
class K(object):
__slots__ = ["a", "b", "d"]
class L(H):
__slots__ = ["e"]
class M(I):
__slots__ = ["e"]
class N(J):
__slots__ = ["__weakref__"]
class P(J):
__slots__ = ["__dict__"]
class Q(J):
pass
class R(J):
__slots__ = ["__dict__", "__weakref__"]
for cls, cls2 in ((G, H), (G, I), (I, H), (Q, R), (R, Q)):
x = cls()
x.a = 1
x.__class__ = cls2
self.assertTrue(x.__class__ is cls2,
"assigning %r as __class__ for %r silently failed" % (cls2, x))
self.assertEqual(x.a, 1)
x.__class__ = cls
self.assertTrue(x.__class__ is cls,
"assigning %r as __class__ for %r silently failed" % (cls, x))
self.assertEqual(x.a, 1)
for cls in G, J, K, L, M, N, P, R, list, Int:
for cls2 in G, J, K, L, M, N, P, R, list, Int:
if cls is cls2:
continue
cant(cls(), cls2)
# Issue5283: when __class__ changes in __del__, the wrong
# type gets DECREF'd.
class O(object):
pass
class A(object):
def __del__(self):
self.__class__ = O
l = [A() for x in range(100)]
del l
def test_set_dict(self):
# Testing __dict__ assignment...
class C(object): pass
a = C()
a.__dict__ = {'b': 1}
self.assertEqual(a.b, 1)
def cant(x, dict):
try:
x.__dict__ = dict
except (AttributeError, TypeError):
pass
else:
self.fail("shouldn't allow %r.__dict__ = %r" % (x, dict))
cant(a, None)
cant(a, [])
cant(a, 1)
del a.__dict__ # Deleting __dict__ is allowed
class Base(object):
pass
def verify_dict_readonly(x):
"""
x has to be an instance of a class inheriting from Base.
"""
cant(x, {})
try:
del x.__dict__
except (AttributeError, TypeError):
pass
else:
self.fail("shouldn't allow del %r.__dict__" % x)
dict_descr = Base.__dict__["__dict__"]
try:
dict_descr.__set__(x, {})
except (AttributeError, TypeError):
pass
else:
self.fail("dict_descr allowed access to %r's dict" % x)
# Classes don't allow __dict__ assignment and have readonly dicts
class Meta1(type, Base):
pass
class Meta2(Base, type):
pass
class D(object, metaclass=Meta1):
pass
class E(object, metaclass=Meta2):
pass
for cls in C, D, E:
verify_dict_readonly(cls)
class_dict = cls.__dict__
try:
class_dict["spam"] = "eggs"
except TypeError:
pass
else:
self.fail("%r's __dict__ can be modified" % cls)
# Modules also disallow __dict__ assignment
class Module1(types.ModuleType, Base):
pass
class Module2(Base, types.ModuleType):
pass
for ModuleType in Module1, Module2:
mod = ModuleType("spam")
verify_dict_readonly(mod)
mod.__dict__["spam"] = "eggs"
# Exception's __dict__ can be replaced, but not deleted
# (at least not any more than regular exception's __dict__ can
# be deleted; on CPython it is not the case, whereas on PyPy they
# can, just like any other new-style instance's __dict__.)
def can_delete_dict(e):
try:
del e.__dict__
except (TypeError, AttributeError):
return False
else:
return True
class Exception1(Exception, Base):
pass
class Exception2(Base, Exception):
pass
for ExceptionType in Exception, Exception1, Exception2:
e = ExceptionType()
e.__dict__ = {"a": 1}
self.assertEqual(e.a, 1)
self.assertEqual(can_delete_dict(e), can_delete_dict(ValueError()))
def test_pickles(self):
# Testing pickling and copying new-style classes and objects...
import pickle
def sorteditems(d):
L = list(d.items())
L.sort()
return L
global C
class C(object):
def __init__(self, a, b):
super(C, self).__init__()
self.a = a
self.b = b
def __repr__(self):
return "C(%r, %r)" % (self.a, self.b)
global C1
class C1(list):
def __new__(cls, a, b):
return super(C1, cls).__new__(cls)
def __getnewargs__(self):
return (self.a, self.b)
def __init__(self, a, b):
self.a = a
self.b = b
def __repr__(self):
return "C1(%r, %r)<%r>" % (self.a, self.b, list(self))
global C2
class C2(int):
def __new__(cls, a, b, val=0):
return super(C2, cls).__new__(cls, val)
def __getnewargs__(self):
return (self.a, self.b, int(self))
def __init__(self, a, b, val=0):
self.a = a
self.b = b
def __repr__(self):
return "C2(%r, %r)<%r>" % (self.a, self.b, int(self))
global C3
class C3(object):
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return self.foo
def __setstate__(self, foo):
self.foo = foo
global C4classic, C4
class C4classic: # classic
pass
class C4(C4classic, object): # mixed inheritance
pass
for bin in 0, 1:
for cls in C, C1, C2:
s = pickle.dumps(cls, bin)
cls2 = pickle.loads(s)
self.assertTrue(cls2 is cls)
a = C1(1, 2); a.append(42); a.append(24)
b = C2("hello", "world", 42)
s = pickle.dumps((a, b), bin)
x, y = pickle.loads(s)
self.assertEqual(x.__class__, a.__class__)
self.assertEqual(sorteditems(x.__dict__), sorteditems(a.__dict__))
self.assertEqual(y.__class__, b.__class__)
self.assertEqual(sorteditems(y.__dict__), sorteditems(b.__dict__))
self.assertEqual(repr(x), repr(a))
self.assertEqual(repr(y), repr(b))
# Test for __getstate__ and __setstate__ on new style class
u = C3(42)
s = pickle.dumps(u, bin)
v = pickle.loads(s)
self.assertEqual(u.__class__, v.__class__)
self.assertEqual(u.foo, v.foo)
# Test for picklability of hybrid class
u = C4()
u.foo = 42
s = pickle.dumps(u, bin)
v = pickle.loads(s)
self.assertEqual(u.__class__, v.__class__)
self.assertEqual(u.foo, v.foo)
# Testing copy.deepcopy()
import copy
for cls in C, C1, C2:
cls2 = copy.deepcopy(cls)
self.assertTrue(cls2 is cls)
a = C1(1, 2); a.append(42); a.append(24)
b = C2("hello", "world", 42)
x, y = copy.deepcopy((a, b))
self.assertEqual(x.__class__, a.__class__)
self.assertEqual(sorteditems(x.__dict__), sorteditems(a.__dict__))
self.assertEqual(y.__class__, b.__class__)
self.assertEqual(sorteditems(y.__dict__), sorteditems(b.__dict__))
self.assertEqual(repr(x), repr(a))
self.assertEqual(repr(y), repr(b))
def test_pickle_slots(self):
# Testing pickling of classes with __slots__ ...
import pickle
# Pickling of classes with __slots__ but without __getstate__ should fail
# (if using protocol 0 or 1)
global B, C, D, E
class B(object):
pass
for base in [object, B]:
class C(base):
__slots__ = ['a']
class D(C):
pass
try:
pickle.dumps(C(), 0)
except TypeError:
pass
else:
self.fail("should fail: pickle C instance - %s" % base)
try:
pickle.dumps(C(), 0)
except TypeError:
pass
else:
self.fail("should fail: pickle D instance - %s" % base)
# Give C a nice generic __getstate__ and __setstate__
class C(base):
__slots__ = ['a']
def __getstate__(self):
try:
d = self.__dict__.copy()
except AttributeError:
d = {}
for cls in self.__class__.__mro__:
for sn in cls.__dict__.get('__slots__', ()):
try:
d[sn] = getattr(self, sn)
except AttributeError:
pass
return d
def __setstate__(self, d):
for k, v in list(d.items()):
setattr(self, k, v)
class D(C):
pass
# Now it should work
x = C()
y = pickle.loads(pickle.dumps(x))
self.assertEqual(hasattr(y, 'a'), 0)
x.a = 42
y = pickle.loads(pickle.dumps(x))
self.assertEqual(y.a, 42)
x = D()
x.a = 42
x.b = 100
y = pickle.loads(pickle.dumps(x))
self.assertEqual(y.a + y.b, 142)
# A subclass that adds a slot should also work
class E(C):
__slots__ = ['b']
x = E()
x.a = 42
x.b = "foo"
y = pickle.loads(pickle.dumps(x))
self.assertEqual(y.a, x.a)
self.assertEqual(y.b, x.b)
def test_binary_operator_override(self):
# Testing overrides of binary operations...
class I(int):
def __repr__(self):
return "I(%r)" % int(self)
def __add__(self, other):
return I(int(self) + int(other))
__radd__ = __add__
def __pow__(self, other, mod=None):
if mod is None:
return I(pow(int(self), int(other)))
else:
return I(pow(int(self), int(other), int(mod)))
def __rpow__(self, other, mod=None):
if mod is None:
return I(pow(int(other), int(self), mod))
else:
return I(pow(int(other), int(self), int(mod)))
self.assertEqual(repr(I(1) + I(2)), "I(3)")
self.assertEqual(repr(I(1) + 2), "I(3)")
self.assertEqual(repr(1 + I(2)), "I(3)")
self.assertEqual(repr(I(2) ** I(3)), "I(8)")
self.assertEqual(repr(2 ** I(3)), "I(8)")
self.assertEqual(repr(I(2) ** 3), "I(8)")
self.assertEqual(repr(pow(I(2), I(3), I(5))), "I(3)")
class S(str):
def __eq__(self, other):
return self.lower() == other.lower()
def test_subclass_propagation(self):
# Testing propagation of slot functions to subclasses...
class A(object):
pass
class B(A):
pass
class C(A):
pass
class D(B, C):
pass
d = D()
orig_hash = hash(d) # related to id(d) in platform-dependent ways
A.__hash__ = lambda self: 42
self.assertEqual(hash(d), 42)
C.__hash__ = lambda self: 314
self.assertEqual(hash(d), 314)
B.__hash__ = lambda self: 144
self.assertEqual(hash(d), 144)
D.__hash__ = lambda self: 100
self.assertEqual(hash(d), 100)
D.__hash__ = None
self.assertRaises(TypeError, hash, d)
del D.__hash__
self.assertEqual(hash(d), 144)
B.__hash__ = None
self.assertRaises(TypeError, hash, d)
del B.__hash__
self.assertEqual(hash(d), 314)
C.__hash__ = None
self.assertRaises(TypeError, hash, d)
del C.__hash__
self.assertEqual(hash(d), 42)
A.__hash__ = None
self.assertRaises(TypeError, hash, d)
del A.__hash__
self.assertEqual(hash(d), orig_hash)
d.foo = 42
d.bar = 42
self.assertEqual(d.foo, 42)
self.assertEqual(d.bar, 42)
def __getattribute__(self, name):
if name == "foo":
return 24
return object.__getattribute__(self, name)
A.__getattribute__ = __getattribute__
self.assertEqual(d.foo, 24)
self.assertEqual(d.bar, 42)
def __getattr__(self, name):
if name in ("spam", "foo", "bar"):
return "hello"
raise AttributeError(name)
B.__getattr__ = __getattr__
self.assertEqual(d.spam, "hello")
self.assertEqual(d.foo, 24)
self.assertEqual(d.bar, 42)
del A.__getattribute__
self.assertEqual(d.foo, 42)
del d.foo
self.assertEqual(d.foo, "hello")
self.assertEqual(d.bar, 42)
del B.__getattr__
try:
d.foo
except AttributeError:
pass
else:
self.fail("d.foo should be undefined now")
# Test a nasty bug in recurse_down_subclasses()
class A(object):
pass
class B(A):
pass
del B
support.gc_collect()
A.__setitem__ = lambda *a: None # crash
def test_buffer_inheritance(self):
# Testing that buffer interface is inherited ...
import binascii
# SF bug [#470040] ParseTuple t# vs subclasses.
class MyBytes(bytes):
pass
base = b'abc'
m = MyBytes(base)
# b2a_hex uses the buffer interface to get its argument's value, via
# PyArg_ParseTuple 't#' code.
self.assertEqual(binascii.b2a_hex(m), binascii.b2a_hex(base))
class MyInt(int):
pass
m = MyInt(42)
try:
binascii.b2a_hex(m)
self.fail('subclass of int should not have a buffer interface')
except TypeError:
pass
def test_str_of_str_subclass(self):
# Testing __str__ defined in subclass of str ...
import binascii
import io
class octetstring(str):
def __str__(self):
return binascii.b2a_hex(self.encode('ascii')).decode("ascii")
def __repr__(self):
return self + " repr"
o = octetstring('A')
self.assertEqual(type(o), octetstring)
self.assertEqual(type(str(o)), str)
self.assertEqual(type(repr(o)), str)
self.assertEqual(ord(o), 0x41)
self.assertEqual(str(o), '41')
self.assertEqual(repr(o), 'A repr')
self.assertEqual(o.__str__(), '41')
self.assertEqual(o.__repr__(), 'A repr')
capture = io.StringIO()
# Calling str() or not exercises different internal paths.
print(o, file=capture)
print(str(o), file=capture)
self.assertEqual(capture.getvalue(), '41\n41\n')
capture.close()
def test_keyword_arguments(self):
# Testing keyword arguments to __init__, __call__...
def f(a): return a
self.assertEqual(f.__call__(a=42), 42)
a = []
list.__init__(a, sequence=[0, 1, 2])
self.assertEqual(a, [0, 1, 2])
def test_recursive_call(self):
# Testing recursive __call__() by setting to instance of class...
class A(object):
pass
A.__call__ = A()
try:
A()()
except RuntimeError:
pass
else:
self.fail("Recursion limit should have been reached for __call__()")
def test_delete_hook(self):
# Testing __del__ hook...
log = []
class C(object):
def __del__(self):
log.append(1)
c = C()
self.assertEqual(log, [])
del c
support.gc_collect()
self.assertEqual(log, [1])
class D(object): pass
d = D()
try: del d[0]
except TypeError: pass
else: self.fail("invalid del() didn't raise TypeError")
def test_hash_inheritance(self):
# Testing hash of mutable subclasses...
class mydict(dict):
pass
d = mydict()
try:
hash(d)
except TypeError:
pass
else:
self.fail("hash() of dict subclass should fail")
class mylist(list):
pass
d = mylist()
try:
hash(d)
except TypeError:
pass
else:
self.fail("hash() of list subclass should fail")
def test_str_operations(self):
try: 'a' + 5
except TypeError: pass
else: self.fail("'' + 5 doesn't raise TypeError")
try: ''.split('')
except ValueError: pass
else: self.fail("''.split('') doesn't raise ValueError")
try: ''.join([0])
except TypeError: pass
else: self.fail("''.join([0]) doesn't raise TypeError")
try: ''.rindex('5')
except ValueError: pass
else: self.fail("''.rindex('5') doesn't raise ValueError")
try: '%(n)s' % None
except TypeError: pass
else: self.fail("'%(n)s' % None doesn't raise TypeError")
try: '%(n' % {}
except ValueError: pass
else: self.fail("'%(n' % {} '' doesn't raise ValueError")
try: '%*s' % ('abc')
except TypeError: pass
else: self.fail("'%*s' % ('abc') doesn't raise TypeError")
try: '%*.*s' % ('abc', 5)
except TypeError: pass
else: self.fail("'%*.*s' % ('abc', 5) doesn't raise TypeError")
try: '%s' % (1, 2)
except TypeError: pass
else: self.fail("'%s' % (1, 2) doesn't raise TypeError")
try: '%' % None
except ValueError: pass
else: self.fail("'%' % None doesn't raise ValueError")
self.assertEqual('534253'.isdigit(), 1)
self.assertEqual('534253x'.isdigit(), 0)
self.assertEqual('%c' % 5, '\x05')
self.assertEqual('%c' % '5', '5')
def test_deepcopy_recursive(self):
# Testing deepcopy of recursive objects...
class Node:
pass
a = Node()
b = Node()
a.b = b
b.a = a
z = deepcopy(a) # This blew up before
def test_unintialized_modules(self):
# Testing uninitialized module objects...
from types import ModuleType as M
m = M.__new__(M)
str(m)
self.assertEqual(hasattr(m, "__name__"), 0)
self.assertEqual(hasattr(m, "__file__"), 0)
self.assertEqual(hasattr(m, "foo"), 0)
self.assertFalse(m.__dict__) # None or {} are both reasonable answers
m.foo = 1
self.assertEqual(m.__dict__, {"foo": 1})
def test_funny_new(self):
# Testing __new__ returning something unexpected...
class C(object):
def __new__(cls, arg):
if isinstance(arg, str): return [1, 2, 3]
elif isinstance(arg, int): return object.__new__(D)
else: return object.__new__(cls)
class D(C):
def __init__(self, arg):
self.foo = arg
self.assertEqual(C("1"), [1, 2, 3])
self.assertEqual(D("1"), [1, 2, 3])
d = D(None)
self.assertEqual(d.foo, None)
d = C(1)
self.assertIsInstance(d, D)
self.assertEqual(d.foo, 1)
d = D(1)
self.assertIsInstance(d, D)
self.assertEqual(d.foo, 1)
def test_imul_bug(self):
# Testing for __imul__ problems...
# SF bug 544647
class C(object):
def __imul__(self, other):
return (self, other)
x = C()
y = x
y *= 1.0
self.assertEqual(y, (x, 1.0))
y = x
y *= 2
self.assertEqual(y, (x, 2))
y = x
y *= 3
self.assertEqual(y, (x, 3))
y = x
y *= 1<<100
self.assertEqual(y, (x, 1<<100))
y = x
y *= None
self.assertEqual(y, (x, None))
y = x
y *= "foo"
self.assertEqual(y, (x, "foo"))
def test_copy_setstate(self):
# Testing that copy.*copy() correctly uses __setstate__...
import copy
class C(object):
def __init__(self, foo=None):
self.foo = foo
self.__foo = foo
def setfoo(self, foo=None):
self.foo = foo
def getfoo(self):
return self.__foo
def __getstate__(self):
return [self.foo]
def __setstate__(self_, lst):
self.assertEqual(len(lst), 1)
self_.__foo = self_.foo = lst[0]
a = C(42)
a.setfoo(24)
self.assertEqual(a.foo, 24)
self.assertEqual(a.getfoo(), 42)
b = copy.copy(a)
self.assertEqual(b.foo, 24)
self.assertEqual(b.getfoo(), 24)
b = copy.deepcopy(a)
self.assertEqual(b.foo, 24)
self.assertEqual(b.getfoo(), 24)
def test_slices(self):
# Testing cases with slices and overridden __getitem__ ...
# Strings
self.assertEqual("hello"[:4], "hell")
self.assertEqual("hello"[slice(4)], "hell")
self.assertEqual(str.__getitem__("hello", slice(4)), "hell")
class S(str):
def __getitem__(self, x):
return str.__getitem__(self, x)
self.assertEqual(S("hello")[:4], "hell")
self.assertEqual(S("hello")[slice(4)], "hell")
self.assertEqual(S("hello").__getitem__(slice(4)), "hell")
# Tuples
self.assertEqual((1,2,3)[:2], (1,2))
self.assertEqual((1,2,3)[slice(2)], (1,2))
self.assertEqual(tuple.__getitem__((1,2,3), slice(2)), (1,2))
class T(tuple):
def __getitem__(self, x):
return tuple.__getitem__(self, x)
self.assertEqual(T((1,2,3))[:2], (1,2))
self.assertEqual(T((1,2,3))[slice(2)], (1,2))
self.assertEqual(T((1,2,3)).__getitem__(slice(2)), (1,2))
# Lists
self.assertEqual([1,2,3][:2], [1,2])
self.assertEqual([1,2,3][slice(2)], [1,2])
self.assertEqual(list.__getitem__([1,2,3], slice(2)), [1,2])
class L(list):
def __getitem__(self, x):
return list.__getitem__(self, x)
self.assertEqual(L([1,2,3])[:2], [1,2])
self.assertEqual(L([1,2,3])[slice(2)], [1,2])
self.assertEqual(L([1,2,3]).__getitem__(slice(2)), [1,2])
# Now do lists and __setitem__
a = L([1,2,3])
a[slice(1, 3)] = [3,2]
self.assertEqual(a, [1,3,2])
a[slice(0, 2, 1)] = [3,1]
self.assertEqual(a, [3,1,2])
a.__setitem__(slice(1, 3), [2,1])
self.assertEqual(a, [3,2,1])
a.__setitem__(slice(0, 2, 1), [2,3])
self.assertEqual(a, [2,3,1])
def test_subtype_resurrection(self):
# Testing resurrection of new-style instance...
class C(object):
container = []
def __del__(self):
# resurrect the instance
C.container.append(self)
c = C()
c.attr = 42
# The most interesting thing here is whether this blows up, due to
# flawed GC tracking logic in typeobject.c's call_finalizer() (a 2.2.1
# bug).
del c
# If that didn't blow up, it's also interesting to see whether clearing
# the last container slot works: that will attempt to delete c again,
# which will cause c to get appended back to the container again
# "during" the del. (On non-CPython implementations, however, __del__
# is typically not called again.)
support.gc_collect()
self.assertEqual(len(C.container), 1)
del C.container[-1]
if support.check_impl_detail():
support.gc_collect()
self.assertEqual(len(C.container), 1)
self.assertEqual(C.container[-1].attr, 42)
# Make c mortal again, so that the test framework with -l doesn't report
# it as a leak.
del C.__del__
def test_slots_trash(self):
# Testing slot trash...
# Deallocating deeply nested slotted trash caused stack overflows
class trash(object):
__slots__ = ['x']
def __init__(self, x):
self.x = x
o = None
for i in range(50000):
o = trash(o)
del o
def test_slots_multiple_inheritance(self):
# SF bug 575229, multiple inheritance w/ slots dumps core
class A(object):
__slots__=()
class B(object):
pass
class C(A,B) :
__slots__=()
if support.check_impl_detail():
self.assertEqual(C.__basicsize__, B.__basicsize__)
self.assertTrue(hasattr(C, '__dict__'))
self.assertTrue(hasattr(C, '__weakref__'))
C().x = 2
def test_rmul(self):
# Testing correct invocation of __rmul__...
# SF patch 592646
class C(object):
def __mul__(self, other):
return "mul"
def __rmul__(self, other):
return "rmul"
a = C()
self.assertEqual(a*2, "mul")
self.assertEqual(a*2.2, "mul")
self.assertEqual(2*a, "rmul")
self.assertEqual(2.2*a, "rmul")
def test_ipow(self):
# Testing correct invocation of __ipow__...
# [SF bug 620179]
class C(object):
def __ipow__(self, other):
pass
a = C()
a **= 2
def test_mutable_bases(self):
# Testing mutable bases...
# stuff that should work:
class C(object):
pass
class C2(object):
def __getattribute__(self, attr):
if attr == 'a':
return 2
else:
return super(C2, self).__getattribute__(attr)
def meth(self):
return 1
class D(C):
pass
class E(D):
pass
d = D()
e = E()
D.__bases__ = (C,)
D.__bases__ = (C2,)
self.assertEqual(d.meth(), 1)
self.assertEqual(e.meth(), 1)
self.assertEqual(d.a, 2)
self.assertEqual(e.a, 2)
self.assertEqual(C2.__subclasses__(), [D])
try:
del D.__bases__
except (TypeError, AttributeError):
pass
else:
self.fail("shouldn't be able to delete .__bases__")
try:
D.__bases__ = ()
except TypeError as msg:
if str(msg) == "a new-style class can't have only classic bases":
self.fail("wrong error message for .__bases__ = ()")
else:
self.fail("shouldn't be able to set .__bases__ to ()")
try:
D.__bases__ = (D,)
except TypeError:
pass
else:
# actually, we'll have crashed by here...
self.fail("shouldn't be able to create inheritance cycles")
try:
D.__bases__ = (C, C)
except TypeError:
pass
else:
self.fail("didn't detect repeated base classes")
try:
D.__bases__ = (E,)
except TypeError:
pass
else:
self.fail("shouldn't be able to create inheritance cycles")
def test_builtin_bases(self):
# Make sure all the builtin types can have their base queried without
# segfaulting. See issue #5787.
builtin_types = [tp for tp in builtins.__dict__.values()
if isinstance(tp, type)]
for tp in builtin_types:
object.__getattribute__(tp, "__bases__")
if tp is not object:
self.assertEqual(len(tp.__bases__), 1, tp)
class L(list):
pass
class C(object):
pass
class D(C):
pass
try:
L.__bases__ = (dict,)
except TypeError:
pass
else:
self.fail("shouldn't turn list subclass into dict subclass")
try:
list.__bases__ = (dict,)
except TypeError:
pass
else:
self.fail("shouldn't be able to assign to list.__bases__")
try:
D.__bases__ = (C, list)
except TypeError:
pass
else:
assert 0, "best_base calculation found wanting"
def test_mutable_bases_with_failing_mro(self):
# Testing mutable bases with failing mro...
class WorkOnce(type):
def __new__(self, name, bases, ns):
self.flag = 0
return super(WorkOnce, self).__new__(WorkOnce, name, bases, ns)
def mro(self):
if self.flag > 0:
raise RuntimeError("bozo")
else:
self.flag += 1
return type.mro(self)
class WorkAlways(type):
def mro(self):
# this is here to make sure that .mro()s aren't called
# with an exception set (which was possible at one point).
# An error message will be printed in a debug build.
# What's a good way to test for this?
return type.mro(self)
class C(object):
pass
class C2(object):
pass
class D(C):
pass
class E(D):
pass
class F(D, metaclass=WorkOnce):
pass
class G(D, metaclass=WorkAlways):
pass
# Immediate subclasses have their mro's adjusted in alphabetical
# order, so E's will get adjusted before adjusting F's fails. We
# check here that E's gets restored.
E_mro_before = E.__mro__
D_mro_before = D.__mro__
try:
D.__bases__ = (C2,)
except RuntimeError:
self.assertEqual(E.__mro__, E_mro_before)
self.assertEqual(D.__mro__, D_mro_before)
else:
self.fail("exception not propagated")
def test_mutable_bases_catch_mro_conflict(self):
# Testing mutable bases catch mro conflict...
class A(object):
pass
class B(object):
pass
class C(A, B):
pass
class D(A, B):
pass
class E(C, D):
pass
try:
C.__bases__ = (B, A)
except TypeError:
pass
else:
self.fail("didn't catch MRO conflict")
def test_mutable_names(self):
# Testing mutable names...
class C(object):
pass
# C.__module__ could be 'test_descr' or '__main__'
mod = C.__module__
C.__name__ = 'D'
self.assertEqual((C.__module__, C.__name__), (mod, 'D'))
C.__name__ = 'D.E'
self.assertEqual((C.__module__, C.__name__), (mod, 'D.E'))
def test_evil_type_name(self):
# A badly placed Py_DECREF in type_set_name led to arbitrary code
# execution while the type structure was not in a sane state, and a
# possible segmentation fault as a result. See bug #16447.
class Nasty(str):
def __del__(self):
C.__name__ = "other"
class C:
pass
C.__name__ = Nasty("abc")
C.__name__ = "normal"
def test_subclass_right_op(self):
# Testing correct dispatch of subclass overloading __r<op>__...
# This code tests various cases where right-dispatch of a subclass
# should be preferred over left-dispatch of a base class.
# Case 1: subclass of int; this tests code in abstract.c::binary_op1()
class B(int):
def __floordiv__(self, other):
return "B.__floordiv__"
def __rfloordiv__(self, other):
return "B.__rfloordiv__"
self.assertEqual(B(1) // 1, "B.__floordiv__")
self.assertEqual(1 // B(1), "B.__rfloordiv__")
# Case 2: subclass of object; this is just the baseline for case 3
class C(object):
def __floordiv__(self, other):
return "C.__floordiv__"
def __rfloordiv__(self, other):
return "C.__rfloordiv__"
self.assertEqual(C() // 1, "C.__floordiv__")
self.assertEqual(1 // C(), "C.__rfloordiv__")
# Case 3: subclass of new-style class; here it gets interesting
class D(C):
def __floordiv__(self, other):
return "D.__floordiv__"
def __rfloordiv__(self, other):
return "D.__rfloordiv__"
self.assertEqual(D() // C(), "D.__floordiv__")
self.assertEqual(C() // D(), "D.__rfloordiv__")
# Case 4: this didn't work right in 2.2.2 and 2.3a1
class E(C):
pass
self.assertEqual(E.__rfloordiv__, C.__rfloordiv__)
self.assertEqual(E() // 1, "C.__floordiv__")
self.assertEqual(1 // E(), "C.__rfloordiv__")
self.assertEqual(E() // C(), "C.__floordiv__")
self.assertEqual(C() // E(), "C.__floordiv__") # This one would fail
@support.impl_detail("testing an internal kind of method object")
def test_meth_class_get(self):
# Testing __get__ method of METH_CLASS C methods...
# Full coverage of descrobject.c::classmethod_get()
# Baseline
arg = [1, 2, 3]
res = {1: None, 2: None, 3: None}
self.assertEqual(dict.fromkeys(arg), res)
self.assertEqual({}.fromkeys(arg), res)
# Now get the descriptor
descr = dict.__dict__["fromkeys"]
# More baseline using the descriptor directly
self.assertEqual(descr.__get__(None, dict)(arg), res)
self.assertEqual(descr.__get__({})(arg), res)
# Now check various error cases
try:
descr.__get__(None, None)
except TypeError:
pass
else:
self.fail("shouldn't have allowed descr.__get__(None, None)")
try:
descr.__get__(42)
except TypeError:
pass
else:
self.fail("shouldn't have allowed descr.__get__(42)")
try:
descr.__get__(None, 42)
except TypeError:
pass
else:
self.fail("shouldn't have allowed descr.__get__(None, 42)")
try:
descr.__get__(None, int)
except TypeError:
pass
else:
self.fail("shouldn't have allowed descr.__get__(None, int)")
def test_isinst_isclass(self):
# Testing proxy isinstance() and isclass()...
class Proxy(object):
def __init__(self, obj):
self.__obj = obj
def __getattribute__(self, name):
if name.startswith("_Proxy__"):
return object.__getattribute__(self, name)
else:
return getattr(self.__obj, name)
# Test with a classic class
class C:
pass
a = C()
pa = Proxy(a)
self.assertIsInstance(a, C) # Baseline
self.assertIsInstance(pa, C) # Test
# Test with a classic subclass
class D(C):
pass
a = D()
pa = Proxy(a)
self.assertIsInstance(a, C) # Baseline
self.assertIsInstance(pa, C) # Test
# Test with a new-style class
class C(object):
pass
a = C()
pa = Proxy(a)
self.assertIsInstance(a, C) # Baseline
self.assertIsInstance(pa, C) # Test
# Test with a new-style subclass
class D(C):
pass
a = D()
pa = Proxy(a)
self.assertIsInstance(a, C) # Baseline
self.assertIsInstance(pa, C) # Test
def test_proxy_super(self):
# Testing super() for a proxy object...
class Proxy(object):
def __init__(self, obj):
self.__obj = obj
def __getattribute__(self, name):
if name.startswith("_Proxy__"):
return object.__getattribute__(self, name)
else:
return getattr(self.__obj, name)
class B(object):
def f(self):
return "B.f"
class C(B):
def f(self):
return super(C, self).f() + "->C.f"
obj = C()
p = Proxy(obj)
self.assertEqual(C.__dict__["f"](p), "B.f->C.f")
def test_carloverre(self):
# Testing prohibition of Carlo Verre's hack...
try:
object.__setattr__(str, "foo", 42)
except TypeError:
pass
else:
self.fail("Carlo Verre __setattr__ succeeded!")
try:
object.__delattr__(str, "lower")
except TypeError:
pass
else:
self.fail("Carlo Verre __delattr__ succeeded!")
def test_weakref_segfault(self):
# Testing weakref segfault...
# SF 742911
import weakref
class Provoker:
def __init__(self, referrent):
self.ref = weakref.ref(referrent)
def __del__(self):
x = self.ref()
class Oops(object):
pass
o = Oops()
o.whatever = Provoker(o)
del o
def test_wrapper_segfault(self):
# SF 927248: deeply nested wrappers could cause stack overflow
f = lambda:None
for i in range(1000000):
f = f.__call__
f = None
def test_file_fault(self):
# Testing sys.stdout is changed in getattr...
test_stdout = sys.stdout
class StdoutGuard:
def __getattr__(self, attr):
sys.stdout = sys.__stdout__
raise RuntimeError("Premature access to sys.stdout.%s" % attr)
sys.stdout = StdoutGuard()
try:
print("Oops!")
except RuntimeError:
pass
finally:
sys.stdout = test_stdout
def test_vicious_descriptor_nonsense(self):
# Testing vicious_descriptor_nonsense...
# A potential segfault spotted by Thomas Wouters in mail to
# python-dev 2003-04-17, turned into an example & fixed by Michael
# Hudson just less than four months later...
class Evil(object):
def __hash__(self):
return hash('attr')
def __eq__(self, other):
del C.attr
return 0
class Descr(object):
def __get__(self, ob, type=None):
return 1
class C(object):
attr = Descr()
c = C()
c.__dict__[Evil()] = 0
self.assertEqual(c.attr, 1)
# this makes a crash more likely:
support.gc_collect()
self.assertEqual(hasattr(c, 'attr'), False)
def test_init(self):
# SF 1155938
class Foo(object):
def __init__(self):
return 10
try:
Foo()
except TypeError:
pass
else:
self.fail("did not test __init__() for None return")
def test_method_wrapper(self):
# Testing method-wrapper objects...
# <type 'method-wrapper'> did not support any reflection before 2.5
# XXX should methods really support __eq__?
l = []
self.assertEqual(l.__add__, l.__add__)
self.assertEqual(l.__add__, [].__add__)
self.assertTrue(l.__add__ != [5].__add__)
self.assertTrue(l.__add__ != l.__mul__)
self.assertTrue(l.__add__.__name__ == '__add__')
if hasattr(l.__add__, '__self__'):
# CPython
self.assertTrue(l.__add__.__self__ is l)
self.assertTrue(l.__add__.__objclass__ is list)
else:
# Python implementations where [].__add__ is a normal bound method
self.assertTrue(l.__add__.im_self is l)
self.assertTrue(l.__add__.im_class is list)
self.assertEqual(l.__add__.__doc__, list.__add__.__doc__)
try:
hash(l.__add__)
except TypeError:
pass
else:
self.fail("no TypeError from hash([].__add__)")
t = ()
t += (7,)
self.assertEqual(t.__add__, (7,).__add__)
self.assertEqual(hash(t.__add__), hash((7,).__add__))
def test_not_implemented(self):
# Testing NotImplemented...
# all binary methods should be able to return a NotImplemented
import operator
def specialmethod(self, other):
return NotImplemented
def check(expr, x, y):
try:
exec(expr, {'x': x, 'y': y, 'operator': operator})
except TypeError:
pass
else:
self.fail("no TypeError from %r" % (expr,))
N1 = sys.maxsize + 1 # might trigger OverflowErrors instead of
# TypeErrors
N2 = sys.maxsize # if sizeof(int) < sizeof(long), might trigger
# ValueErrors instead of TypeErrors
for name, expr, iexpr in [
('__add__', 'x + y', 'x += y'),
('__sub__', 'x - y', 'x -= y'),
('__mul__', 'x * y', 'x *= y'),
('__truediv__', 'operator.truediv(x, y)', None),
('__floordiv__', 'operator.floordiv(x, y)', None),
('__div__', 'x / y', 'x /= y'),
('__mod__', 'x % y', 'x %= y'),
('__divmod__', 'divmod(x, y)', None),
('__pow__', 'x ** y', 'x **= y'),
('__lshift__', 'x << y', 'x <<= y'),
('__rshift__', 'x >> y', 'x >>= y'),
('__and__', 'x & y', 'x &= y'),
('__or__', 'x | y', 'x |= y'),
('__xor__', 'x ^ y', 'x ^= y')]:
rname = '__r' + name[2:]
A = type('A', (), {name: specialmethod})
a = A()
check(expr, a, a)
check(expr, a, N1)
check(expr, a, N2)
if iexpr:
check(iexpr, a, a)
check(iexpr, a, N1)
check(iexpr, a, N2)
iname = '__i' + name[2:]
C = type('C', (), {iname: specialmethod})
c = C()
check(iexpr, c, a)
check(iexpr, c, N1)
check(iexpr, c, N2)
def test_assign_slice(self):
# ceval.c's assign_slice used to check for
# tp->tp_as_sequence->sq_slice instead of
# tp->tp_as_sequence->sq_ass_slice
class C(object):
def __setitem__(self, idx, value):
self.value = value
c = C()
c[1:2] = 3
self.assertEqual(c.value, 3)
def test_set_and_no_get(self):
# See
# http://mail.python.org/pipermail/python-dev/2010-January/095637.html
class Descr(object):
def __init__(self, name):
self.name = name
def __set__(self, obj, value):
obj.__dict__[self.name] = value
descr = Descr("a")
class X(object):
a = descr
x = X()
self.assertIs(x.a, descr)
x.a = 42
self.assertEqual(x.a, 42)
# Also check type_getattro for correctness.
class Meta(type):
pass
class X(object):
__metaclass__ = Meta
X.a = 42
Meta.a = Descr("a")
self.assertEqual(X.a, 42)
def test_getattr_hooks(self):
# issue 4230
class Descriptor(object):
counter = 0
def __get__(self, obj, objtype=None):
def getter(name):
self.counter += 1
raise AttributeError(name)
return getter
descr = Descriptor()
class A(object):
__getattribute__ = descr
class B(object):
__getattr__ = descr
class C(object):
__getattribute__ = descr
__getattr__ = descr
self.assertRaises(AttributeError, getattr, A(), "attr")
self.assertEqual(descr.counter, 1)
self.assertRaises(AttributeError, getattr, B(), "attr")
self.assertEqual(descr.counter, 2)
self.assertRaises(AttributeError, getattr, C(), "attr")
self.assertEqual(descr.counter, 4)
class EvilGetattribute(object):
# This used to segfault
def __getattr__(self, name):
raise AttributeError(name)
def __getattribute__(self, name):
del EvilGetattribute.__getattr__
for i in range(5):
gc.collect()
raise AttributeError(name)
self.assertRaises(AttributeError, getattr, EvilGetattribute(), "attr")
def test_type___getattribute__(self):
self.assertRaises(TypeError, type.__getattribute__, list, type)
def test_abstractmethods(self):
# type pretends not to have __abstractmethods__.
self.assertRaises(AttributeError, getattr, type, "__abstractmethods__")
class meta(type):
pass
self.assertRaises(AttributeError, getattr, meta, "__abstractmethods__")
class X(object):
pass
with self.assertRaises(AttributeError):
del X.__abstractmethods__
def test_proxy_call(self):
class FakeStr:
__class__ = str
fake_str = FakeStr()
# isinstance() reads __class__
self.assertTrue(isinstance(fake_str, str))
# call a method descriptor
with self.assertRaises(TypeError):
str.split(fake_str)
# call a slot wrapper descriptor
with self.assertRaises(TypeError):
str.__add__(fake_str, "abc")
def test_repr_as_str(self):
# Issue #11603: crash or infinite loop when rebinding __str__ as
# __repr__.
class Foo:
pass
Foo.__repr__ = Foo.__str__
foo = Foo()
self.assertRaises(RuntimeError, str, foo)
self.assertRaises(RuntimeError, repr, foo)
def test_mixing_slot_wrappers(self):
class X(dict):
__setattr__ = dict.__setitem__
x = X()
x.y = 42
self.assertEqual(x["y"], 42)
def test_slot_shadows_class_variable(self):
with self.assertRaises(ValueError) as cm:
class X:
__slots__ = ["foo"]
foo = None
m = str(cm.exception)
self.assertEqual("'foo' in __slots__ conflicts with class variable", m)
def test_set_doc(self):
class X:
"elephant"
X.__doc__ = "banana"
self.assertEqual(X.__doc__, "banana")
with self.assertRaises(TypeError) as cm:
type(list).__dict__["__doc__"].__set__(list, "blah")
self.assertIn("can't set list.__doc__", str(cm.exception))
with self.assertRaises(TypeError) as cm:
type(X).__dict__["__doc__"].__delete__(X)
self.assertIn("can't delete X.__doc__", str(cm.exception))
self.assertEqual(X.__doc__, "banana")
def test_qualname(self):
descriptors = [str.lower, complex.real, float.real, int.__add__]
types = ['method', 'member', 'getset', 'wrapper']
# make sure we have an example of each type of descriptor
for d, n in zip(descriptors, types):
self.assertEqual(type(d).__name__, n + '_descriptor')
for d in descriptors:
qualname = d.__objclass__.__qualname__ + '.' + d.__name__
self.assertEqual(d.__qualname__, qualname)
self.assertEqual(str.lower.__qualname__, 'str.lower')
self.assertEqual(complex.real.__qualname__, 'complex.real')
self.assertEqual(float.real.__qualname__, 'float.real')
self.assertEqual(int.__add__.__qualname__, 'int.__add__')
class X:
pass
with self.assertRaises(TypeError):
del X.__qualname__
self.assertRaises(TypeError, type.__dict__['__qualname__'].__set__,
str, 'Oink')
def test_qualname_dict(self):
ns = {'__qualname__': 'some.name'}
tp = type('Foo', (), ns)
self.assertEqual(tp.__qualname__, 'some.name')
self.assertNotIn('__qualname__', tp.__dict__)
self.assertEqual(ns, {'__qualname__': 'some.name'})
ns = {'__qualname__': 1}
self.assertRaises(TypeError, type, 'Foo', (), ns)
def test_cycle_through_dict(self):
# See bug #1469629
class X(dict):
def __init__(self):
dict.__init__(self)
self.__dict__ = self
x = X()
x.attr = 42
wr = weakref.ref(x)
del x
support.gc_collect()
self.assertIsNone(wr())
for o in gc.get_objects():
self.assertIsNot(type(o), X)
def test_object_new_and_init_with_parameters(self):
# See issue #1683368
class OverrideNeither:
pass
self.assertRaises(TypeError, OverrideNeither, 1)
self.assertRaises(TypeError, OverrideNeither, kw=1)
class OverrideNew:
def __new__(cls, foo, kw=0, *args, **kwds):
return object.__new__(cls, *args, **kwds)
class OverrideInit:
def __init__(self, foo, kw=0, *args, **kwargs):
return object.__init__(self, *args, **kwargs)
class OverrideBoth(OverrideNew, OverrideInit):
pass
for case in OverrideNew, OverrideInit, OverrideBoth:
case(1)
case(1, kw=2)
self.assertRaises(TypeError, case, 1, 2, 3)
self.assertRaises(TypeError, case, 1, 2, foo=3)
class DictProxyTests(unittest.TestCase):
def setUp(self):
class C(object):
def meth(self):
pass
self.C = C
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'trace function introduces __local__')
def test_iter_keys(self):
# Testing dict-proxy keys...
it = self.C.__dict__.keys()
self.assertNotIsInstance(it, list)
keys = list(it)
keys.sort()
self.assertEqual(keys, ['__dict__', '__doc__', '__module__',
'__weakref__', 'meth'])
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'trace function introduces __local__')
def test_iter_values(self):
# Testing dict-proxy values...
it = self.C.__dict__.values()
self.assertNotIsInstance(it, list)
values = list(it)
self.assertEqual(len(values), 5)
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'trace function introduces __local__')
def test_iter_items(self):
# Testing dict-proxy iteritems...
it = self.C.__dict__.items()
self.assertNotIsInstance(it, list)
keys = [item[0] for item in it]
keys.sort()
self.assertEqual(keys, ['__dict__', '__doc__', '__module__',
'__weakref__', 'meth'])
def test_dict_type_with_metaclass(self):
# Testing type of __dict__ when metaclass set...
class B(object):
pass
class M(type):
pass
class C(metaclass=M):
# In 2.3a1, C.__dict__ was a real dict rather than a dict proxy
pass
self.assertEqual(type(C.__dict__), type(B.__dict__))
def test_repr(self):
# Testing mappingproxy.__repr__.
# We can't blindly compare with the repr of another dict as ordering
# of keys and values is arbitrary and may differ.
r = repr(self.C.__dict__)
self.assertTrue(r.startswith('mappingproxy('), r)
self.assertTrue(r.endswith(')'), r)
for k, v in self.C.__dict__.items():
self.assertIn('{!r}: {!r}'.format(k, v), r)
class PTypesLongInitTest(unittest.TestCase):
# This is in its own TestCase so that it can be run before any other tests.
def test_pytype_long_ready(self):
# Testing SF bug 551412 ...
# This dumps core when SF bug 551412 isn't fixed --
# but only when test_descr.py is run separately.
# (That can't be helped -- as soon as PyType_Ready()
# is called for PyLong_Type, the bug is gone.)
class UserLong(object):
def __pow__(self, *args):
pass
try:
pow(0, UserLong(), 0)
except:
pass
# Another segfault only when run early
# (before PyType_Ready(tuple) is called)
type.mro(tuple)
class MiscTests(unittest.TestCase):
def test_type_lookup_mro_reference(self):
# Issue #14199: _PyType_Lookup() has to keep a strong reference to
# the type MRO because it may be modified during the lookup, if
# __bases__ is set during the lookup for example.
class MyKey(object):
def __hash__(self):
return hash('mykey')
def __eq__(self, other):
X.__bases__ = (Base2,)
class Base(object):
mykey = 'from Base'
mykey2 = 'from Base'
class Base2(object):
mykey = 'from Base2'
mykey2 = 'from Base2'
X = type('X', (Base,), {MyKey(): 5})
# mykey is read from Base
self.assertEqual(X.mykey, 'from Base')
# mykey2 is read from Base2 because MyKey.__eq__ has set __bases__
self.assertEqual(X.mykey2, 'from Base2')
def test_main():
# Run all local test cases, with PTypesLongInitTest first.
support.run_unittest(PTypesLongInitTest, OperatorsTest,
ClassPropertiesAndMethods, DictProxyTests,
MiscTests)
if __name__ == "__main__":
test_main()
| bsd-3-clause |
xuqingkuang/tsCloud | tsCloud/core/models/fields/files.py | 1 | 1657 | import os.path
import urllib
from django.db import models
from django.core.files import File
from django.core.validators import URLValidator
from django.core.exceptions import ValidationError
from tsCloud.core.storage import OverwriteStorage
class URLImageFieldFile(models.fields.files.ImageFieldFile):
def __init__(self, instance, field, name):
super(URLImageFieldFile, self).__init__(instance, field, name)
self.url_validator = URLValidator()
self.storage = OverwriteStorage()
def download(self, url = None, name = None, overwrite=False):
if not overwrite and os.path.isfile(self.path):
return
if not name:
name = self.generate_filename(url)
image_file = self.retrive(url = url)
if not image_file:
return
self.save(name, File(open(image_file)))
def generate_filename(self, filename = None):
if not filename:
filename = self.name
filename = filename.split('/')[-1].split('#')[0].split('?')[0]
return "".join([c for c in filename if c.isalpha() or c.isdigit() or c=='.']).rstrip()
def name_is_url(self):
try:
self.url_validator(self.name)
return True
except ValidationError, e:
return False
def retrive(self, url = None):
if not url and not self.name_is_url():
return None
if not url:
url = self.name
image_files = urllib.urlretrieve(url)
if image_files:
return image_files[0]
return None
class URLImageField(models.ImageField):
attr_class = URLImageFieldFile
| gpl-3.0 |
any1m1c/ipc20161 | jokenpo/equipe2/jokenpo_sheldon.py | 1 | 5023 | #
#Kylciane Cristiny Lopes Freitas - 1615310052
#Thiago Santos Borges - 1615310023
#
import random
cond = True
pedra = 1
papel = 2
tesoura = 3
spock = 4
lagarto = 5
print("Vamos brincar de Jokenpo?")
print("Digite seu nome")
nome = input()
print("1 - Pedra")
print("2 - Papel")
print("3 - Tesoura")
print("4 - Spock")
print("5 - Lagarto")
tentativa = int(input("Digite uma face\n"))
while cond:
bot = random.randrange(1,4)
#############################################################
if tentativa == pedra and bot == pedra:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Empate!!!")
elif tentativa == papel and bot == papel:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Empate!!!")
elif tentativa == tesoura and bot == tesoura:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Empate!!!")
elif tentativa == lagarto and bot == lagarto:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Empate!!!")
elif tentativa == spock and bot == spock:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Empate!!!")
###########################################################
elif tentativa == pedra and bot == papel:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Derrota!!!\nPapel cobre pedra")
elif tentativa == pedra and bot == spock:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Derrota!!!\nSpock vaporiza a pedra")
elif tentativa == pedra and bot == tesoura:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Vitoria!!!\nPedra quebra tesoura")
elif tentativa == pedra and bot == lagarto:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Vitoria!!!\nPedra esmaga lagarto")
##############################################################
elif tentativa == papel and bot == tesoura:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Derrota!!!\nTesoura corta papel")
elif tentativa == papel and bot == lagarto:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Derrota!!!\nLagarto come papel")
elif tentativa == papel and bot == pedra:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Vitoria!!!\nPapel cobre pedra")
elif tentativa == papel and bot == spock:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Vitoria!!!\nPapel refuta spock")
################################################################
elif tentativa == tesoura and bot == pedra:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Derrota!!!\nPedra quebra tesoura")
elif tentativa == tesoura and bot == spock:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Derrota!!!\nSpock quebra tesoura")
elif tentativa == tesoura and bot == papel:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Vitoria!!!\nTesoura corta papel")
elif tentativa == tesoura and bot == lagarto:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Vitoria!!!\nTesoura decapita lagarto")
##################################################################
elif tentativa == spock and bot == lagarto:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Derrota!!!\nLagarto envenena spock")
elif tentativa == spock and bot == papel:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Derrota!!!\nPapel refuta spock")
elif tentativa == spock and bot == tesoura:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Vitoria!!!\nSpock quebra tesoura")
elif tentativa == spock and bot == pedra:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Vitoria!!!\nSpock vaporiza a pedra")
###################################################################
elif tentativa == lagarto and bot == pedra:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Derrota!!!\nPedra esmaga lagarto")
elif tentativa == lagarto and bot == tesoura:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Derrota!!!\nTesoura decapita lagarto")
elif tentativa == lagarto and bot == papel:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Vitoria!!!\nLagarto come papel")
elif tentativa == lagarto and bot == spock:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Vitoria!!!\nLagarto envenena spock")
###################################################################
resp = str(input("Deseja jogar novamente??\n"))
if resp == "n":
cond = False
else:
cond = True
tentativa = int(input("Digite uma face\n"))
| apache-2.0 |
2ndQuadrant/ansible | lib/ansible/modules/cloud/openstack/os_loadbalancer.py | 2 | 21100 | #!/usr/bin/python
# Copyright (c) 2018 Catalyst Cloud Ltd.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_loadbalancer
short_description: Add/Delete load balancer from OpenStack Cloud
extends_documentation_fragment: openstack
version_added: "2.7"
author: "Lingxian Kong (@lingxiankong)"
description:
- Add or Remove load balancer from the OpenStack load-balancer
service(Octavia). Load balancer update is not supported for now.
options:
name:
description:
- Name that has to be given to the load balancer
required: true
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
vip_network:
description:
- The name or id of the network for the virtual IP of the load balancer.
One of I(vip_network), I(vip_subnet), or I(vip_port) must be specified
for creation.
vip_subnet:
description:
- The name or id of the subnet for the virtual IP of the load balancer.
One of I(vip_network), I(vip_subnet), or I(vip_port) must be specified
for creation.
vip_port:
description:
- The name or id of the load balancer virtual IP port. One of
I(vip_network), I(vip_subnet), or I(vip_port) must be specified for
creation.
vip_address:
description:
- IP address of the load balancer virtual IP.
public_ip_address:
description:
- Public IP address associated with the VIP.
auto_public_ip:
description:
- Allocate a public IP address and associate with the VIP automatically.
type: bool
default: 'no'
public_network:
description:
- The name or ID of a Neutron external network.
delete_public_ip:
description:
- When C(state=absent) and this option is true, any public IP address
associated with the VIP will be deleted along with the load balancer.
type: bool
default: 'no'
listeners:
description:
- A list of listeners that attached to the load balancer.
suboptions:
name:
description:
- The listener name or ID.
protocol:
description:
- The protocol for the listener.
default: HTTP
protocol_port:
description:
- The protocol port number for the listener.
default: 80
pool:
description:
- The pool attached to the listener.
suboptions:
name:
description:
- The pool name or ID.
protocol:
description:
- The protocol for the pool.
default: HTTP
lb_algorithm:
description:
- The load balancing algorithm for the pool.
default: ROUND_ROBIN
members:
description:
- A list of members that added to the pool.
suboptions:
name:
description:
- The member name or ID.
address:
description:
- The IP address of the member.
protocol_port:
description:
- The protocol port number for the member.
default: 80
subnet:
description:
- The name or ID of the subnet the member service is
accessible from.
wait:
description:
- If the module should wait for the load balancer to be created or
deleted.
type: bool
default: 'yes'
timeout:
description:
- The amount of time the module should wait.
default: 180
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements: ["openstacksdk"]
'''
RETURN = '''
id:
description: The load balancer UUID.
returned: On success when C(state=present)
type: str
sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
loadbalancer:
description: Dictionary describing the load balancer.
returned: On success when C(state=present)
type: complex
contains:
id:
description: Unique UUID.
type: str
sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
name:
description: Name given to the load balancer.
type: str
sample: "lingxian_test"
vip_network_id:
description: Network ID the load balancer virtual IP port belongs in.
type: str
sample: "f171db43-56fd-41cf-82d7-4e91d741762e"
vip_subnet_id:
description: Subnet ID the load balancer virtual IP port belongs in.
type: str
sample: "c53e3c70-9d62-409a-9f71-db148e7aa853"
vip_port_id:
description: The load balancer virtual IP port ID.
type: str
sample: "2061395c-1c01-47ab-b925-c91b93df9c1d"
vip_address:
description: The load balancer virtual IP address.
type: str
sample: "192.168.2.88"
public_vip_address:
description: The load balancer public VIP address.
type: str
sample: "10.17.8.254"
provisioning_status:
description: The provisioning status of the load balancer.
type: str
sample: "ACTIVE"
operating_status:
description: The operating status of the load balancer.
type: str
sample: "ONLINE"
is_admin_state_up:
description: The administrative state of the load balancer.
type: bool
sample: true
listeners:
description: The associated listener IDs, if any.
type: list
sample: [{"id": "7aa1b380-beec-459c-a8a7-3a4fb6d30645"}, {"id": "692d06b8-c4f8-4bdb-b2a3-5a263cc23ba6"}]
pools:
description: The associated pool IDs, if any.
type: list
sample: [{"id": "27b78d92-cee1-4646-b831-e3b90a7fa714"}, {"id": "befc1fb5-1992-4697-bdb9-eee330989344"}]
'''
EXAMPLES = '''
# Create a load balancer by specifying the VIP subnet.
- os_loadbalancer:
auth:
auth_url: https://identity.example.com
username: admin
password: passme
project_name: admin
state: present
name: my_lb
vip_subnet: my_subnet
timeout: 150
# Create a load balancer by specifying the VIP network and the IP address.
- os_loadbalancer:
auth:
auth_url: https://identity.example.com
username: admin
password: passme
project_name: admin
state: present
name: my_lb
vip_network: my_network
vip_address: 192.168.0.11
# Create a load balancer together with its sub-resources in the 'all in one'
# way. A public IP address is also allocated to the load balancer VIP.
- os_loadbalancer:
auth:
auth_url: https://identity.example.com
username: admin
password: passme
project_name: admin
name: lingxian_test
state: present
vip_subnet: kong_subnet
auto_public_ip: yes
public_network: public
listeners:
- name: lingxian_80
protocol: TCP
protocol_port: 80
pool:
name: lingxian_80_pool
protocol: TCP
members:
- name: mywebserver1
address: 192.168.2.81
protocol_port: 80
subnet: webserver_subnet
- name: lingxian_8080
protocol: TCP
protocol_port: 8080
pool:
name: lingxian_8080-pool
protocol: TCP
members:
- name: mywebserver2
address: 192.168.2.82
protocol_port: 8080
wait: yes
timeout: 600
# Delete a load balancer(and all its related resources)
- os_loadbalancer:
auth:
auth_url: https://identity.example.com
username: admin
password: passme
project_name: admin
state: absent
name: my_lb
# Delete a load balancer(and all its related resources) together with the
# public IP address(if any) attached to it.
- os_loadbalancer:
auth:
auth_url: https://identity.example.com
username: admin
password: passme
project_name: admin
state: absent
name: my_lb
delete_public_ip: yes
'''
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, \
openstack_module_kwargs, openstack_cloud_from_module
def _wait_for_lb(module, cloud, lb, status, failures, interval=5):
"""Wait for load balancer to be in a particular provisioning status."""
timeout = module.params['timeout']
total_sleep = 0
if failures is None:
failures = []
while total_sleep < timeout:
lb = cloud.load_balancer.get_load_balancer(lb.id)
if lb:
if lb.provisioning_status == status:
return None
if lb.provisioning_status in failures:
module.fail_json(
msg="Load Balancer %s transitioned to failure state %s" %
(lb.id, lb.provisioning_status)
)
else:
if status == "DELETED":
return None
else:
module.fail_json(
msg="Load Balancer %s transitioned to DELETED" % lb.id
)
time.sleep(interval)
total_sleep += interval
module.fail_json(
msg="Timeout waiting for Load Balancer %s to transition to %s" %
(lb.id, status)
)
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
vip_network=dict(required=False),
vip_subnet=dict(required=False),
vip_port=dict(required=False),
vip_address=dict(required=False),
listeners=dict(type='list', default=[]),
public_ip_address=dict(required=False, default=None),
auto_public_ip=dict(required=False, default=False, type='bool'),
public_network=dict(required=False),
delete_public_ip=dict(required=False, default=False, type='bool'),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
sdk, cloud = openstack_cloud_from_module(module)
vip_network = module.params['vip_network']
vip_subnet = module.params['vip_subnet']
vip_port = module.params['vip_port']
listeners = module.params['listeners']
public_vip_address = module.params['public_ip_address']
allocate_fip = module.params['auto_public_ip']
delete_fip = module.params['delete_public_ip']
public_network = module.params['public_network']
vip_network_id = None
vip_subnet_id = None
vip_port_id = None
try:
changed = False
lb = cloud.load_balancer.find_load_balancer(
name_or_id=module.params['name'])
if module.params['state'] == 'present':
if not lb:
if not (vip_network or vip_subnet or vip_port):
module.fail_json(
msg="One of vip_network, vip_subnet, or vip_port must "
"be specified for load balancer creation"
)
if vip_network:
network = cloud.get_network(vip_network)
if not network:
module.fail_json(
msg='network %s is not found' % vip_network
)
vip_network_id = network.id
if vip_subnet:
subnet = cloud.get_subnet(vip_subnet)
if not subnet:
module.fail_json(
msg='subnet %s is not found' % vip_subnet
)
vip_subnet_id = subnet.id
if vip_port:
port = cloud.get_port(vip_port)
if not port:
module.fail_json(
msg='port %s is not found' % vip_port
)
vip_port_id = port.id
lb = cloud.load_balancer.create_load_balancer(
name=module.params['name'],
vip_network_id=vip_network_id,
vip_subnet_id=vip_subnet_id,
vip_port_id=vip_port_id,
vip_address=module.params['vip_address'],
)
changed = True
if not listeners and not module.params['wait']:
module.exit_json(
changed=changed,
loadbalancer=lb.to_dict(),
id=lb.id
)
_wait_for_lb(module, cloud, lb, "ACTIVE", ["ERROR"])
for listener_def in listeners:
listener_name = listener_def.get("name")
pool_def = listener_def.get("pool")
if not listener_name:
module.fail_json(msg='listener name is required')
listener = cloud.load_balancer.find_listener(
name_or_id=listener_name
)
if not listener:
_wait_for_lb(module, cloud, lb, "ACTIVE", ["ERROR"])
protocol = listener_def.get("protocol", "HTTP")
protocol_port = listener_def.get("protocol_port", 80)
listener = cloud.load_balancer.create_listener(
name=listener_name,
loadbalancer_id=lb.id,
protocol=protocol,
protocol_port=protocol_port,
)
changed = True
# Ensure pool in the listener.
if pool_def:
pool_name = pool_def.get("name")
members = pool_def.get('members', [])
if not pool_name:
module.fail_json(msg='pool name is required')
pool = cloud.load_balancer.find_pool(name_or_id=pool_name)
if not pool:
_wait_for_lb(module, cloud, lb, "ACTIVE", ["ERROR"])
protocol = pool_def.get("protocol", "HTTP")
lb_algorithm = pool_def.get("lb_algorithm",
"ROUND_ROBIN")
pool = cloud.load_balancer.create_pool(
name=pool_name,
listener_id=listener.id,
protocol=protocol,
lb_algorithm=lb_algorithm
)
changed = True
# Ensure members in the pool
for member_def in members:
member_name = member_def.get("name")
if not member_name:
module.fail_json(msg='member name is required')
member = cloud.load_balancer.find_member(member_name,
pool.id)
if not member:
_wait_for_lb(module, cloud, lb, "ACTIVE",
["ERROR"])
address = member_def.get("address")
if not address:
module.fail_json(
msg='member address for member %s is '
'required' % member_name
)
subnet_id = member_def.get("subnet")
if subnet_id:
subnet = cloud.get_subnet(subnet_id)
if not subnet:
module.fail_json(
msg='subnet %s for member %s is not '
'found' % (subnet_id, member_name)
)
subnet_id = subnet.id
protocol_port = member_def.get("protocol_port", 80)
member = cloud.load_balancer.create_member(
pool,
name=member_name,
address=address,
protocol_port=protocol_port,
subnet_id=subnet_id
)
changed = True
# Associate public ip to the load balancer VIP. If
# public_vip_address is provided, use that IP, otherwise, either
# find an available public ip or create a new one.
fip = None
orig_public_ip = None
new_public_ip = None
if public_vip_address or allocate_fip:
ips = cloud.network.ips(
port_id=lb.vip_port_id,
fixed_ip_address=lb.vip_address
)
ips = list(ips)
if ips:
orig_public_ip = ips[0]
new_public_ip = orig_public_ip.floating_ip_address
if public_vip_address and public_vip_address != orig_public_ip:
fip = cloud.network.find_ip(public_vip_address)
if not fip:
module.fail_json(
msg='Public IP %s is unavailable' % public_vip_address
)
# Release origin public ip first
cloud.network.update_ip(
orig_public_ip,
fixed_ip_address=None,
port_id=None
)
# Associate new public ip
cloud.network.update_ip(
fip,
fixed_ip_address=lb.vip_address,
port_id=lb.vip_port_id
)
new_public_ip = public_vip_address
changed = True
elif allocate_fip and not orig_public_ip:
fip = cloud.network.find_available_ip()
if not fip:
if not public_network:
module.fail_json(msg="Public network is not provided")
pub_net = cloud.network.find_network(public_network)
if not pub_net:
module.fail_json(
msg='Public network %s not found' %
public_network
)
fip = cloud.network.create_ip(
floating_network_id=pub_net.id
)
cloud.network.update_ip(
fip,
fixed_ip_address=lb.vip_address,
port_id=lb.vip_port_id
)
new_public_ip = fip.floating_ip_address
changed = True
# Include public_vip_address in the result.
lb = cloud.load_balancer.find_load_balancer(name_or_id=lb.id)
lb_dict = lb.to_dict()
lb_dict.update({"public_vip_address": new_public_ip})
module.exit_json(
changed=changed,
loadbalancer=lb_dict,
id=lb.id
)
elif module.params['state'] == 'absent':
changed = False
public_vip_address = None
if lb:
if delete_fip:
ips = cloud.network.ips(
port_id=lb.vip_port_id,
fixed_ip_address=lb.vip_address
)
ips = list(ips)
if ips:
public_vip_address = ips[0]
# Deleting load balancer with `cascade=False` does not make
# sense because the deletion will always fail if there are
# sub-resources.
cloud.load_balancer.delete_load_balancer(lb, cascade=True)
changed = True
if module.params['wait']:
_wait_for_lb(module, cloud, lb, "DELETED", ["ERROR"])
if delete_fip and public_vip_address:
cloud.network.delete_ip(public_vip_address)
changed = True
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
if __name__ == "__main__":
main()
| gpl-3.0 |
doganaltunbay/odoo | addons/website_twitter/models/twitter_config.py | 377 | 2095 | import logging
from openerp.osv import fields, osv
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class twitter_config_settings(osv.osv_memory):
_inherit = 'website.config.settings'
_columns = {
'twitter_api_key': fields.related(
'website_id', 'twitter_api_key', type="char",
string='Twitter API Key',
help="Twitter API key you can get it from https://apps.twitter.com/app/new"),
'twitter_api_secret': fields.related(
'website_id', 'twitter_api_secret', type="char",
string='Twitter API secret',
help="Twitter API secret you can get it from https://apps.twitter.com/app/new"),
'twitter_tutorial': fields.dummy(
type="boolean", string="Show me how to obtain the Twitter API Key and Secret"),
'twitter_screen_name': fields.related(
'website_id', 'twitter_screen_name',
type="char", string='Get favorites from this screen name',
help="Screen Name of the Twitter Account from which you want to load favorites."
"It does not have to match the API Key/Secret."),
}
def _check_twitter_authorization(self, cr, uid, config_id, context=None):
website_obj = self.pool['website']
website_config = self.browse(cr, uid, config_id, context=context)
try:
website_obj.fetch_favorite_tweets(cr, uid, [website_config.website_id.id], context=context)
except Exception:
_logger.warning('Failed to verify twitter API authorization', exc_info=True)
raise osv.except_osv(_('Twitter authorization error!'), _('Please double-check your Twitter API Key and Secret'))
def create(self, cr, uid, vals, context=None):
res_id = super(twitter_config_settings, self).create(cr, uid, vals, context=context)
if vals.get('twitter_api_key') and vals.get('twitter_api_secret'):
self._check_twitter_authorization(cr, uid, res_id, context=context)
return res_id | agpl-3.0 |
marco-lancini/Showcase | dbindexer/base.py | 14 | 1891 | from django.conf import settings
from django.utils.importlib import import_module
def merge_dicts(d1, d2):
'''Update dictionary recursively. If values for a given key exist in both dictionaries and are dict-like they are merged.'''
for k, v in d2.iteritems():
# Try to merge the values as if they were dicts.
try:
merge_dicts(d1[k], v)
# Otherwise just overwrite the original value (if any).
except (AttributeError, KeyError):
d1[k] = v
class DatabaseOperations(object):
dbindexer_compiler_module = __name__.rsplit('.', 1)[0] + '.compiler'
def __init__(self):
self._dbindexer_cache = {}
def compiler(self, compiler_name):
if compiler_name not in self._dbindexer_cache:
target = super(DatabaseOperations, self).compiler(compiler_name)
base = getattr(
import_module(self.dbindexer_compiler_module), compiler_name)
class Compiler(base, target):
pass
self._dbindexer_cache[compiler_name] = Compiler
return self._dbindexer_cache[compiler_name]
class BaseDatabaseWrapper(object):
def __init__(self, *args, **kwargs):
super(BaseDatabaseWrapper, self).__init__(*args, **kwargs)
class Operations(DatabaseOperations, self.ops.__class__):
pass
self.ops.__class__ = Operations
self.ops.__init__()
def DatabaseWrapper(settings_dict, *args, **kwargs):
target_settings = settings.DATABASES[settings_dict['TARGET']]
engine = target_settings['ENGINE'] + '.base'
target = import_module(engine).DatabaseWrapper
class Wrapper(BaseDatabaseWrapper, target):
pass
# Update settings with target database settings (which can contain nested dicts).
merge_dicts(settings_dict, target_settings)
return Wrapper(settings_dict, *args, **kwargs)
| mit |
yceruto/django | tests/str/models.py | 38 | 1303 | # -*- coding: utf-8 -*-
"""
2. Adding __str__() or __unicode__() to models
Although it's not a strict requirement, each model should have a
``_str__()`` or ``__unicode__()`` method to return a "human-readable"
representation of the object. Do this not only for your own sanity when dealing
with the interactive prompt, but also because objects' representations are used
throughout Django's automatically-generated admin.
Normally, you should write ``__unicode__()`` method, since this will work for
all field types (and Django will automatically provide an appropriate
``__str__()`` method). However, you can write a ``__str__()`` method directly,
if you prefer. You must be careful to encode the results correctly, though.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
def __str__(self):
# Caution: this is only safe if you are certain that headline will be
# in ASCII.
return self.headline
@python_2_unicode_compatible
class InternationalArticle(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
def __str__(self):
return self.headline
| bsd-3-clause |
VanirAOSP/external_chromium_org | tools/telemetry/telemetry/core/backends/chrome/tracing_backend_unittest.py | 23 | 4266 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import json
import logging
import unittest
from telemetry.core import util
from telemetry.core.backends.chrome import tracing_backend
from telemetry.unittest import tab_test_case
class CategoryFilterTest(unittest.TestCase):
def testIsSubset(self):
b = tracing_backend.CategoryFilter(None)
a = tracing_backend.CategoryFilter(None)
self.assertEquals(a.IsSubset(b), True)
b = tracing_backend.CategoryFilter(None)
a = tracing_backend.CategoryFilter("test1,test2")
self.assertEquals(a.IsSubset(b), True)
b = tracing_backend.CategoryFilter(None)
a = tracing_backend.CategoryFilter("-test1,-test2")
self.assertEquals(a.IsSubset(b), True)
b = tracing_backend.CategoryFilter("test1,test2")
a = tracing_backend.CategoryFilter(None)
self.assertEquals(a.IsSubset(b), None)
b = tracing_backend.CategoryFilter(None)
a = tracing_backend.CategoryFilter("test*")
self.assertEquals(a.IsSubset(b), None)
b = tracing_backend.CategoryFilter("test?")
a = tracing_backend.CategoryFilter(None)
self.assertEquals(a.IsSubset(b), None)
b = tracing_backend.CategoryFilter("test1")
a = tracing_backend.CategoryFilter("test1,test2")
self.assertEquals(a.IsSubset(b), False)
b = tracing_backend.CategoryFilter("-test1")
a = tracing_backend.CategoryFilter("test1")
self.assertEquals(a.IsSubset(b), False)
b = tracing_backend.CategoryFilter("test1,test2")
a = tracing_backend.CategoryFilter("test2,test1")
self.assertEquals(a.IsSubset(b), True)
b = tracing_backend.CategoryFilter("-test1,-test2")
a = tracing_backend.CategoryFilter("-test2")
self.assertEquals(a.IsSubset(b), False)
b = tracing_backend.CategoryFilter("disabled-by-default-test1")
a = tracing_backend.CategoryFilter(
"disabled-by-default-test1,disabled-by-default-test2")
self.assertEquals(a.IsSubset(b), False)
class TracingBackendTest(tab_test_case.TabTestCase):
def _StartServer(self):
self._browser.SetHTTPServerDirectories(util.GetUnittestDataDir())
def _WaitForAnimationFrame(self):
def _IsDone():
js_is_done = """done"""
return bool(self._tab.EvaluateJavaScript(js_is_done))
util.WaitFor(_IsDone, 5)
def testGotTrace(self):
if not self._browser.supports_tracing:
logging.warning('Browser does not support tracing, skipping test.')
return
self._StartServer()
self._browser.StartTracing()
self._browser.StopTracing()
# TODO(tengs): check model for correctness after trace_event_importer
# is implemented (crbug.com/173327).
class ChromeTraceResultTest(unittest.TestCase):
# Override TestCase.run to run a test with all possible
# implementations of ChromeTraceResult.
def __init__(self, method_name):
self._chromeTraceResultClass = None
super(ChromeTraceResultTest, self).__init__(method_name)
def run(self, result=None):
def ChromeRawTraceResultWrapper(strings):
return tracing_backend.ChromeRawTraceResult(map(json.loads, strings))
classes = [
tracing_backend.ChromeLegacyTraceResult,
ChromeRawTraceResultWrapper
]
for cls in classes:
self._chromeTraceResultClass = cls
super(ChromeTraceResultTest, self).run(result)
def testWrite1(self):
ri = self._chromeTraceResultClass([])
f = cStringIO.StringIO()
ri.Serialize(f)
v = f.getvalue()
j = json.loads(v)
assert 'traceEvents' in j
self.assertEquals(j['traceEvents'], [])
def testWrite2(self):
ri = self._chromeTraceResultClass([
'"foo"',
'"bar"'])
f = cStringIO.StringIO()
ri.Serialize(f)
v = f.getvalue()
j = json.loads(v)
assert 'traceEvents' in j
self.assertEquals(j['traceEvents'], ['foo', 'bar'])
def testWrite3(self):
ri = self._chromeTraceResultClass([
'"foo"',
'"bar"',
'"baz"'])
f = cStringIO.StringIO()
ri.Serialize(f)
v = f.getvalue()
j = json.loads(v)
assert 'traceEvents' in j
self.assertEquals(j['traceEvents'],
['foo', 'bar', 'baz'])
| bsd-3-clause |
QiJune/Paddle | python/paddle/fluid/tests/unittests/test_weight_normalization.py | 7 | 4587 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy
import collections
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.initializer import ConstantInitializer
from paddle.fluid.param_attr import WeightNormParamAttr
class TestWeightNormalization(unittest.TestCase):
batch_size = 3
hidden_size = 5
data_desc = (['x', [10], 0], )
@classmethod
def setUpClass(cls):
cls.set_program()
@classmethod
def set_program(cls):
data = fluid.layers.data(
name=cls.data_desc[0][0], shape=cls.data_desc[0][1])
out = fluid.layers.fc(input=data,
size=cls.hidden_size,
param_attr=WeightNormParamAttr(
dim=None,
name='weight_norm_param',
initializer=ConstantInitializer(1.0)),
bias_attr=False,
act=None)
loss = fluid.layers.reduce_sum(out)
fluid.backward.append_backward(loss=loss)
cls.fetch_list = [
'weight_norm_param_g', 'weight_norm_param_v',
'weight_norm_param_g@GRAD'
]
def run_program(self):
outputs = []
places = [core.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(core.CUDAPlace(0))
for place in places:
self.set_inputs(place)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
output = exe.run(fluid.default_main_program(),
feed=self.inputs,
fetch_list=self.fetch_list,
return_numpy=False)
outputs.append(output)
self.actual_outputs = outputs
def set_data(self):
self.data = collections.OrderedDict()
for desc in self.data_desc:
data_name = desc[0]
data_shape = desc[1]
data_lod_level = desc[2]
data_lod = []
for i in range(data_lod_level):
lod_level_i = numpy.random.randint(
low=1,
high=5,
size=self.batch_size
if i == 0 else sum(lod_level_i)).tolist()
data_lod.append(lod_level_i)
data_value = numpy.random.random(
size=[sum(data_lod[-1]) if data_lod else self.batch_size
] + data_shape).astype('float32')
self.data[data_name] = (data_value, data_lod)
def set_inputs(self, place):
self.inputs = {}
for desc in self.data_desc:
tensor = fluid.Tensor()
tensor.set(self.data[desc[0]][0], place)
if self.data[desc[0]][1]:
tensor.set_recursive_sequence_lengths(self.data[desc[0]][1])
self.inputs[desc[0]] = tensor
def weight_normalize(self):
v = numpy.ones((self.data[self.data_desc[0][0]][0].shape[-1],
self.hidden_size))
g = numpy.linalg.norm(v, axis=None, keepdims=True)
w = g * v / numpy.linalg.norm(v, axis=None, keepdims=True)
x = self.data[self.data_desc[0][0]][0]
out = numpy.dot(x, w)
g_grad = (numpy.dot(x.T, numpy.ones_like(out)) * (v / numpy.linalg.norm(
v, axis=None, keepdims=True))).sum(axis=None, keepdims=True)
return g, v, g_grad
def test_weight_normalization(self):
self.set_data()
self.run_program()
expect_output = self.weight_normalize()
for actual_output in self.actual_outputs:
[
self.assertTrue(
numpy.allclose(
numpy.array(actual), expect, atol=0.001))
for expect, actual in zip(expect_output, actual_output)
]
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
xq262144/hue | desktop/core/ext-py/boto-2.46.1/boto/dynamodb/layer2.py | 135 | 33814 | # Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.dynamodb.layer1 import Layer1
from boto.dynamodb.table import Table
from boto.dynamodb.schema import Schema
from boto.dynamodb.item import Item
from boto.dynamodb.batch import BatchList, BatchWriteList
from boto.dynamodb.types import get_dynamodb_type, Dynamizer, \
LossyFloatDynamizer, NonBooleanDynamizer
class TableGenerator(object):
"""
This is an object that wraps up the table_generator function.
The only real reason to have this is that we want to be able
to accumulate and return the ConsumedCapacityUnits element that
is part of each response.
:ivar last_evaluated_key: A sequence representing the key(s)
of the item last evaluated, or None if no additional
results are available.
:ivar remaining: The remaining quantity of results requested.
:ivar table: The table to which the call was made.
"""
def __init__(self, table, callable, remaining, item_class, kwargs):
self.table = table
self.callable = callable
self.remaining = -1 if remaining is None else remaining
self.item_class = item_class
self.kwargs = kwargs
self._consumed_units = 0.0
self.last_evaluated_key = None
self._count = 0
self._scanned_count = 0
self._response = None
@property
def count(self):
"""
The total number of items retrieved thus far. This value changes with
iteration and even when issuing a call with count=True, it is necessary
to complete the iteration to assert an accurate count value.
"""
self.response
return self._count
@property
def scanned_count(self):
"""
As above, but representing the total number of items scanned by
DynamoDB, without regard to any filters.
"""
self.response
return self._scanned_count
@property
def consumed_units(self):
"""
Returns a float representing the ConsumedCapacityUnits accumulated.
"""
self.response
return self._consumed_units
@property
def response(self):
"""
The current response to the call from DynamoDB.
"""
return self.next_response() if self._response is None else self._response
def next_response(self):
"""
Issue a call and return the result. You can invoke this method
while iterating over the TableGenerator in order to skip to the
next "page" of results.
"""
# preserve any existing limit in case the user alters self.remaining
limit = self.kwargs.get('limit')
if (self.remaining > 0 and (limit is None or limit > self.remaining)):
self.kwargs['limit'] = self.remaining
self._response = self.callable(**self.kwargs)
self.kwargs['limit'] = limit
self._consumed_units += self._response.get('ConsumedCapacityUnits', 0.0)
self._count += self._response.get('Count', 0)
self._scanned_count += self._response.get('ScannedCount', 0)
# at the expense of a possibly gratuitous dynamize, ensure that
# early generator termination won't result in bad LEK values
if 'LastEvaluatedKey' in self._response:
lek = self._response['LastEvaluatedKey']
esk = self.table.layer2.dynamize_last_evaluated_key(lek)
self.kwargs['exclusive_start_key'] = esk
lektuple = (lek['HashKeyElement'],)
if 'RangeKeyElement' in lek:
lektuple += (lek['RangeKeyElement'],)
self.last_evaluated_key = lektuple
else:
self.last_evaluated_key = None
return self._response
def __iter__(self):
while self.remaining != 0:
response = self.response
for item in response.get('Items', []):
self.remaining -= 1
yield self.item_class(self.table, attrs=item)
if self.remaining == 0:
break
if response is not self._response:
break
else:
if self.last_evaluated_key is not None:
self.next_response()
continue
break
if response is not self._response:
continue
break
class Layer2(object):
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
debug=0, security_token=None, region=None,
validate_certs=True, dynamizer=LossyFloatDynamizer,
profile_name=None):
self.layer1 = Layer1(aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port,
debug, security_token, region,
validate_certs=validate_certs,
profile_name=profile_name)
self.dynamizer = dynamizer()
def use_decimals(self, use_boolean=False):
"""
Use the ``decimal.Decimal`` type for encoding/decoding numeric types.
By default, ints/floats are used to represent numeric types
('N', 'NS') received from DynamoDB. Using the ``Decimal``
type is recommended to prevent loss of precision.
"""
# Eventually this should be made the default dynamizer.
self.dynamizer = Dynamizer() if use_boolean else NonBooleanDynamizer()
def dynamize_attribute_updates(self, pending_updates):
"""
Convert a set of pending item updates into the structure
required by Layer1.
"""
d = {}
for attr_name in pending_updates:
action, value = pending_updates[attr_name]
if value is None:
# DELETE without an attribute value
d[attr_name] = {"Action": action}
else:
d[attr_name] = {"Action": action,
"Value": self.dynamizer.encode(value)}
return d
def dynamize_item(self, item):
d = {}
for attr_name in item:
d[attr_name] = self.dynamizer.encode(item[attr_name])
return d
def dynamize_range_key_condition(self, range_key_condition):
"""
Convert a layer2 range_key_condition parameter into the
structure required by Layer1.
"""
return range_key_condition.to_dict()
def dynamize_scan_filter(self, scan_filter):
"""
Convert a layer2 scan_filter parameter into the
structure required by Layer1.
"""
d = None
if scan_filter:
d = {}
for attr_name in scan_filter:
condition = scan_filter[attr_name]
d[attr_name] = condition.to_dict()
return d
def dynamize_expected_value(self, expected_value):
"""
Convert an expected_value parameter into the data structure
required for Layer1.
"""
d = None
if expected_value:
d = {}
for attr_name in expected_value:
attr_value = expected_value[attr_name]
if attr_value is True:
attr_value = {'Exists': True}
elif attr_value is False:
attr_value = {'Exists': False}
else:
val = self.dynamizer.encode(expected_value[attr_name])
attr_value = {'Value': val}
d[attr_name] = attr_value
return d
def dynamize_last_evaluated_key(self, last_evaluated_key):
"""
Convert a last_evaluated_key parameter into the data structure
required for Layer1.
"""
d = None
if last_evaluated_key:
hash_key = last_evaluated_key['HashKeyElement']
d = {'HashKeyElement': self.dynamizer.encode(hash_key)}
if 'RangeKeyElement' in last_evaluated_key:
range_key = last_evaluated_key['RangeKeyElement']
d['RangeKeyElement'] = self.dynamizer.encode(range_key)
return d
def build_key_from_values(self, schema, hash_key, range_key=None):
"""
Build a Key structure to be used for accessing items
in Amazon DynamoDB. This method takes the supplied hash_key
and optional range_key and validates them against the
schema. If there is a mismatch, a TypeError is raised.
Otherwise, a Python dict version of a Amazon DynamoDB Key
data structure is returned.
:type hash_key: int|float|str|unicode|Binary
:param hash_key: The hash key of the item you are looking for.
The type of the hash key should match the type defined in
the schema.
:type range_key: int|float|str|unicode|Binary
:param range_key: The range key of the item your are looking for.
This should be supplied only if the schema requires a
range key. The type of the range key should match the
type defined in the schema.
"""
dynamodb_key = {}
dynamodb_value = self.dynamizer.encode(hash_key)
if list(dynamodb_value.keys())[0] != schema.hash_key_type:
msg = 'Hashkey must be of type: %s' % schema.hash_key_type
raise TypeError(msg)
dynamodb_key['HashKeyElement'] = dynamodb_value
if range_key is not None:
dynamodb_value = self.dynamizer.encode(range_key)
if list(dynamodb_value.keys())[0] != schema.range_key_type:
msg = 'RangeKey must be of type: %s' % schema.range_key_type
raise TypeError(msg)
dynamodb_key['RangeKeyElement'] = dynamodb_value
return dynamodb_key
def new_batch_list(self):
"""
Return a new, empty :class:`boto.dynamodb.batch.BatchList`
object.
"""
return BatchList(self)
def new_batch_write_list(self):
"""
Return a new, empty :class:`boto.dynamodb.batch.BatchWriteList`
object.
"""
return BatchWriteList(self)
def list_tables(self, limit=None):
"""
Return a list of the names of all tables associated with the
current account and region.
:type limit: int
:param limit: The maximum number of tables to return.
"""
tables = []
start_table = None
while not limit or len(tables) < limit:
this_round_limit = None
if limit:
this_round_limit = limit - len(tables)
this_round_limit = min(this_round_limit, 100)
result = self.layer1.list_tables(limit=this_round_limit, start_table=start_table)
tables.extend(result.get('TableNames', []))
start_table = result.get('LastEvaluatedTableName', None)
if not start_table:
break
return tables
def describe_table(self, name):
"""
Retrieve information about an existing table.
:type name: str
:param name: The name of the desired table.
"""
return self.layer1.describe_table(name)
def table_from_schema(self, name, schema):
"""
Create a Table object from a schema.
This method will create a Table object without
making any API calls. If you know the name and schema
of the table, you can use this method instead of
``get_table``.
Example usage::
table = layer2.table_from_schema(
'tablename',
Schema.create(hash_key=('foo', 'N')))
:type name: str
:param name: The name of the table.
:type schema: :class:`boto.dynamodb.schema.Schema`
:param schema: The schema associated with the table.
:rtype: :class:`boto.dynamodb.table.Table`
:return: A Table object representing the table.
"""
return Table.create_from_schema(self, name, schema)
def get_table(self, name):
"""
Retrieve the Table object for an existing table.
:type name: str
:param name: The name of the desired table.
:rtype: :class:`boto.dynamodb.table.Table`
:return: A Table object representing the table.
"""
response = self.layer1.describe_table(name)
return Table(self, response)
lookup = get_table
def create_table(self, name, schema, read_units, write_units):
"""
Create a new Amazon DynamoDB table.
:type name: str
:param name: The name of the desired table.
:type schema: :class:`boto.dynamodb.schema.Schema`
:param schema: The Schema object that defines the schema used
by this table.
:type read_units: int
:param read_units: The value for ReadCapacityUnits.
:type write_units: int
:param write_units: The value for WriteCapacityUnits.
:rtype: :class:`boto.dynamodb.table.Table`
:return: A Table object representing the new Amazon DynamoDB table.
"""
response = self.layer1.create_table(name, schema.dict,
{'ReadCapacityUnits': read_units,
'WriteCapacityUnits': write_units})
return Table(self, response)
def update_throughput(self, table, read_units, write_units):
"""
Update the ProvisionedThroughput for the Amazon DynamoDB Table.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object whose throughput is being updated.
:type read_units: int
:param read_units: The new value for ReadCapacityUnits.
:type write_units: int
:param write_units: The new value for WriteCapacityUnits.
"""
response = self.layer1.update_table(table.name,
{'ReadCapacityUnits': read_units,
'WriteCapacityUnits': write_units})
table.update_from_response(response)
def delete_table(self, table):
"""
Delete this table and all items in it. After calling this
the Table objects status attribute will be set to 'DELETING'.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object that is being deleted.
"""
response = self.layer1.delete_table(table.name)
table.update_from_response(response)
def create_schema(self, hash_key_name, hash_key_proto_value,
range_key_name=None, range_key_proto_value=None):
"""
Create a Schema object used when creating a Table.
:type hash_key_name: str
:param hash_key_name: The name of the HashKey for the schema.
:type hash_key_proto_value: int|long|float|str|unicode|Binary
:param hash_key_proto_value: A sample or prototype of the type
of value you want to use for the HashKey. Alternatively,
you can also just pass in the Python type (e.g. int, float, etc.).
:type range_key_name: str
:param range_key_name: The name of the RangeKey for the schema.
This parameter is optional.
:type range_key_proto_value: int|long|float|str|unicode|Binary
:param range_key_proto_value: A sample or prototype of the type
of value you want to use for the RangeKey. Alternatively,
you can also pass in the Python type (e.g. int, float, etc.)
This parameter is optional.
"""
hash_key = (hash_key_name, get_dynamodb_type(hash_key_proto_value))
if range_key_name and range_key_proto_value is not None:
range_key = (range_key_name,
get_dynamodb_type(range_key_proto_value))
else:
range_key = None
return Schema.create(hash_key, range_key)
def get_item(self, table, hash_key, range_key=None,
attributes_to_get=None, consistent_read=False,
item_class=Item):
"""
Retrieve an existing item from the table.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object from which the item is retrieved.
:type hash_key: int|long|float|str|unicode|Binary
:param hash_key: The HashKey of the requested item. The
type of the value must match the type defined in the
schema for the table.
:type range_key: int|long|float|str|unicode|Binary
:param range_key: The optional RangeKey of the requested item.
The type of the value must match the type defined in the
schema for the table.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
"""
key = self.build_key_from_values(table.schema, hash_key, range_key)
response = self.layer1.get_item(table.name, key,
attributes_to_get, consistent_read,
object_hook=self.dynamizer.decode)
item = item_class(table, hash_key, range_key, response['Item'])
if 'ConsumedCapacityUnits' in response:
item.consumed_units = response['ConsumedCapacityUnits']
return item
def batch_get_item(self, batch_list):
"""
Return a set of attributes for a multiple items in
multiple tables using their primary keys.
:type batch_list: :class:`boto.dynamodb.batch.BatchList`
:param batch_list: A BatchList object which consists of a
list of :class:`boto.dynamoddb.batch.Batch` objects.
Each Batch object contains the information about one
batch of objects that you wish to retrieve in this
request.
"""
request_items = batch_list.to_dict()
return self.layer1.batch_get_item(request_items,
object_hook=self.dynamizer.decode)
def batch_write_item(self, batch_list):
"""
Performs multiple Puts and Deletes in one batch.
:type batch_list: :class:`boto.dynamodb.batch.BatchWriteList`
:param batch_list: A BatchWriteList object which consists of a
list of :class:`boto.dynamoddb.batch.BatchWrite` objects.
Each Batch object contains the information about one
batch of objects that you wish to put or delete.
"""
request_items = batch_list.to_dict()
return self.layer1.batch_write_item(request_items,
object_hook=self.dynamizer.decode)
def put_item(self, item, expected_value=None, return_values=None):
"""
Store a new item or completely replace an existing item
in Amazon DynamoDB.
:type item: :class:`boto.dynamodb.item.Item`
:param item: The Item to write to Amazon DynamoDB.
:type expected_value: dict
:param expected_value: A dictionary of name/value pairs that you expect.
This dictionary should have name/value pairs where the name
is the name of the attribute and the value is either the value
you are expecting or False if you expect the attribute not to
exist.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
expected_value = self.dynamize_expected_value(expected_value)
response = self.layer1.put_item(item.table.name,
self.dynamize_item(item),
expected_value, return_values,
object_hook=self.dynamizer.decode)
if 'ConsumedCapacityUnits' in response:
item.consumed_units = response['ConsumedCapacityUnits']
return response
def update_item(self, item, expected_value=None, return_values=None):
"""
Commit pending item updates to Amazon DynamoDB.
:type item: :class:`boto.dynamodb.item.Item`
:param item: The Item to update in Amazon DynamoDB. It is expected
that you would have called the add_attribute, put_attribute
and/or delete_attribute methods on this Item prior to calling
this method. Those queued changes are what will be updated.
:type expected_value: dict
:param expected_value: A dictionary of name/value pairs that you
expect. This dictionary should have name/value pairs where the
name is the name of the attribute and the value is either the
value you are expecting or False if you expect the attribute
not to exist.
:type return_values: str
:param return_values: Controls the return of attribute name/value pairs
before they were updated. Possible values are: None, 'ALL_OLD',
'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is
specified and the item is overwritten, the content of the old item
is returned. If 'ALL_NEW' is specified, then all the attributes of
the new version of the item are returned. If 'UPDATED_NEW' is
specified, the new versions of only the updated attributes are
returned.
"""
expected_value = self.dynamize_expected_value(expected_value)
key = self.build_key_from_values(item.table.schema,
item.hash_key, item.range_key)
attr_updates = self.dynamize_attribute_updates(item._updates)
response = self.layer1.update_item(item.table.name, key,
attr_updates,
expected_value, return_values,
object_hook=self.dynamizer.decode)
item._updates.clear()
if 'ConsumedCapacityUnits' in response:
item.consumed_units = response['ConsumedCapacityUnits']
return response
def delete_item(self, item, expected_value=None, return_values=None):
"""
Delete the item from Amazon DynamoDB.
:type item: :class:`boto.dynamodb.item.Item`
:param item: The Item to delete from Amazon DynamoDB.
:type expected_value: dict
:param expected_value: A dictionary of name/value pairs that you expect.
This dictionary should have name/value pairs where the name
is the name of the attribute and the value is either the value
you are expecting or False if you expect the attribute not to
exist.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
expected_value = self.dynamize_expected_value(expected_value)
key = self.build_key_from_values(item.table.schema,
item.hash_key, item.range_key)
return self.layer1.delete_item(item.table.name, key,
expected=expected_value,
return_values=return_values,
object_hook=self.dynamizer.decode)
def query(self, table, hash_key, range_key_condition=None,
attributes_to_get=None, request_limit=None,
max_results=None, consistent_read=False,
scan_index_forward=True, exclusive_start_key=None,
item_class=Item, count=False):
"""
Perform a query on the table.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object that is being queried.
:type hash_key: int|long|float|str|unicode|Binary
:param hash_key: The HashKey of the requested item. The
type of the value must match the type defined in the
schema for the table.
:type range_key_condition: :class:`boto.dynamodb.condition.Condition`
:param range_key_condition: A Condition object.
Condition object can be one of the following types:
EQ|LE|LT|GE|GT|BEGINS_WITH|BETWEEN
The only condition which expects or will accept two
values is 'BETWEEN', otherwise a single value should
be passed to the Condition constructor.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type request_limit: int
:param request_limit: The maximum number of items to retrieve
from Amazon DynamoDB on each request. You may want to set
a specific request_limit based on the provisioned throughput
of your table. The default behavior is to retrieve as many
results as possible per request.
:type max_results: int
:param max_results: The maximum number of results that will
be retrieved from Amazon DynamoDB in total. For example,
if you only wanted to see the first 100 results from the
query, regardless of how many were actually available, you
could set max_results to 100 and the generator returned
from the query method will only yeild 100 results max.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type scan_index_forward: bool
:param scan_index_forward: Specified forward or backward
traversal of the index. Default is forward (True).
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Query operation, even if the
operation has no matching items for the assigned filter.
If count is True, the actual items are not returned and
the count is accessible as the ``count`` attribute of
the returned object.
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
:rtype: :class:`boto.dynamodb.layer2.TableGenerator`
"""
if range_key_condition:
rkc = self.dynamize_range_key_condition(range_key_condition)
else:
rkc = None
if exclusive_start_key:
esk = self.build_key_from_values(table.schema,
*exclusive_start_key)
else:
esk = None
kwargs = {'table_name': table.name,
'hash_key_value': self.dynamizer.encode(hash_key),
'range_key_conditions': rkc,
'attributes_to_get': attributes_to_get,
'limit': request_limit,
'count': count,
'consistent_read': consistent_read,
'scan_index_forward': scan_index_forward,
'exclusive_start_key': esk,
'object_hook': self.dynamizer.decode}
return TableGenerator(table, self.layer1.query,
max_results, item_class, kwargs)
def scan(self, table, scan_filter=None,
attributes_to_get=None, request_limit=None, max_results=None,
exclusive_start_key=None, item_class=Item, count=False):
"""
Perform a scan of DynamoDB.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object that is being scanned.
:type scan_filter: A dict
:param scan_filter: A dictionary where the key is the
attribute name and the value is a
:class:`boto.dynamodb.condition.Condition` object.
Valid Condition objects include:
* EQ - equal (1)
* NE - not equal (1)
* LE - less than or equal (1)
* LT - less than (1)
* GE - greater than or equal (1)
* GT - greater than (1)
* NOT_NULL - attribute exists (0, use None)
* NULL - attribute does not exist (0, use None)
* CONTAINS - substring or value in list (1)
* NOT_CONTAINS - absence of substring or value in list (1)
* BEGINS_WITH - substring prefix (1)
* IN - exact match in list (N)
* BETWEEN - >= first value, <= second value (2)
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type request_limit: int
:param request_limit: The maximum number of items to retrieve
from Amazon DynamoDB on each request. You may want to set
a specific request_limit based on the provisioned throughput
of your table. The default behavior is to retrieve as many
results as possible per request.
:type max_results: int
:param max_results: The maximum number of results that will
be retrieved from Amazon DynamoDB in total. For example,
if you only wanted to see the first 100 results from the
query, regardless of how many were actually available, you
could set max_results to 100 and the generator returned
from the query method will only yeild 100 results max.
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Scan operation, even if the
operation has no matching items for the assigned filter.
If count is True, the actual items are not returned and
the count is accessible as the ``count`` attribute of
the returned object.
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
:rtype: :class:`boto.dynamodb.layer2.TableGenerator`
"""
if exclusive_start_key:
esk = self.build_key_from_values(table.schema,
*exclusive_start_key)
else:
esk = None
kwargs = {'table_name': table.name,
'scan_filter': self.dynamize_scan_filter(scan_filter),
'attributes_to_get': attributes_to_get,
'limit': request_limit,
'count': count,
'exclusive_start_key': esk,
'object_hook': self.dynamizer.decode}
return TableGenerator(table, self.layer1.scan,
max_results, item_class, kwargs)
| apache-2.0 |
clovett/MissionPlanner | LogAnalyzer/tests/TestMotorBalance.py | 215 | 2265 | from LogAnalyzer import Test,TestResult
import DataflashLog
class TestBalanceTwist(Test):
'''test for badly unbalanced copter, including yaw twist'''
def __init__(self):
Test.__init__(self)
self.name = "Motor Balance"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
if logdata.vehicleType != "ArduCopter":
self.result.status = TestResult.StatusType.NA
return
self.result.status = TestResult.StatusType.UNKNOWN
if not "RCOU" in logdata.channels:
return
ch = []
for i in range(8):
if "Chan"+`(i+1)` in logdata.channels["RCOU"]:
ch.append(map(lambda x: x[1], logdata.channels["RCOU"]["Chan"+`(i+1)`].listData))
elif "Ch"+`(i+1)` in logdata.channels["RCOU"]:
ch.append(map(lambda x: x[1], logdata.channels["RCOU"]["Ch"+`(i+1)`].listData))
ch = zip(*ch)
num_channels = 0
for i in range(len(ch)):
ch[i] = filter(lambda x: (x>0 and x<3000), ch[i])
if num_channels < len(ch[i]):
num_channels = len(ch[i])
if num_channels < 2:
return
min_throttle = logdata.parameters["RC3_MIN"] + logdata.parameters["THR_MIN"] / (logdata.parameters["RC3_MAX"]-logdata.parameters["RC3_MIN"])/1000.0
ch = filter(lambda x:sum(x)/num_channels > min_throttle, ch)
if len(ch) == 0:
return
avg_all = map(lambda x:sum(x)/num_channels,ch)
avg_all = sum(avg_all)/len(avg_all)
avg_ch = []
for i in range(num_channels):
avg = map(lambda x: x[i],ch)
avg = sum(avg)/len(avg)
avg_ch.append(avg)
self.result.statusMessage = "Motor channel averages = %s\nAverage motor output = %.0f\nDifference between min and max motor averages = %.0f" % (str(avg_ch),avg_all,abs(min(avg_ch)-max(avg_ch)))
self.result.status = TestResult.StatusType.GOOD
if abs(min(avg_ch)-max(avg_ch)) > 75:
self.result.status = TestResult.StatusType.WARN
if abs(min(avg_ch)-max(avg_ch)) > 150:
self.result.status = TestResult.StatusType.FAIL
| gpl-3.0 |
collabspot/muninn | lib/cssselect/xpath.py | 7 | 25022 | # coding: utf8
"""
cssselect.xpath
===============
Translation of parsed CSS selectors to XPath expressions.
:copyright: (c) 2007-2012 Ian Bicking and contributors.
See AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import re
from parser import parse, parse_series, SelectorError
if sys.version_info[0] < 3:
_basestring = basestring
_unicode = unicode
else:
_basestring = str
_unicode = str
def _unicode_safe_getattr(obj, name, default=None):
# getattr() with a non-ASCII name fails on Python 2.x
name = name.encode('ascii', 'replace').decode('ascii')
return getattr(obj, name, default)
class ExpressionError(SelectorError, RuntimeError):
"""Unknown or unsupported selector (eg. pseudo-class)."""
#### XPath Helpers
class XPathExpr(object):
def __init__(self, path='', element='*', condition='', star_prefix=False):
self.path = path
self.element = element
self.condition = condition
def __str__(self):
path = _unicode(self.path) + _unicode(self.element)
if self.condition:
path += '[%s]' % self.condition
return path
def __repr__(self):
return '%s[%s]' % (self.__class__.__name__, self)
def add_condition(self, condition):
if self.condition:
self.condition = '%s and (%s)' % (self.condition, condition)
else:
self.condition = condition
return self
def add_name_test(self):
if self.element == '*':
# We weren't doing a test anyway
return
self.add_condition(
"name() = %s" % GenericTranslator.xpath_literal(self.element))
self.element = '*'
def add_star_prefix(self):
"""
Append '*/' to the path to keep the context constrained
to a single parent.
"""
self.path += '*/'
def join(self, combiner, other):
path = _unicode(self) + combiner
# Any "star prefix" is redundant when joining.
if other.path != '*/':
path += other.path
self.path = path
self.element = other.element
self.condition = other.condition
return self
split_at_single_quotes = re.compile("('+)").split
# The spec is actually more permissive than that, but don’t bother.
# This is just for the fast path.
# http://www.w3.org/TR/REC-xml/#NT-NameStartChar
is_safe_name = re.compile('^[a-zA-Z_][a-zA-Z0-9_.-]*$').match
# Test that the string is not empty and does not contain whitespace
is_non_whitespace = re.compile(r'^[^ \t\r\n\f]+$').match
#### Translation
class GenericTranslator(object):
"""
Translator for "generic" XML documents.
Everything is case-sensitive, no assumption is made on the meaning
of element names and attribute names.
"""
####
#### HERE BE DRAGONS
####
#### You are welcome to hook into this to change some behavior,
#### but do so at your own risks.
#### Until is has recieved a lot more work and review,
#### I reserve the right to change this API in backward-incompatible ways
#### with any minor version of cssselect.
#### See https://github.com/SimonSapin/cssselect/pull/22
#### -- Simon Sapin.
####
combinator_mapping = {
' ': 'descendant',
'>': 'child',
'+': 'direct_adjacent',
'~': 'indirect_adjacent',
}
attribute_operator_mapping = {
'exists': 'exists',
'=': 'equals',
'~=': 'includes',
'|=': 'dashmatch',
'^=': 'prefixmatch',
'$=': 'suffixmatch',
'*=': 'substringmatch',
'!=': 'different', # XXX Not in Level 3 but meh
}
#: The attribute used for ID selectors depends on the document language:
#: http://www.w3.org/TR/selectors/#id-selectors
id_attribute = 'id'
#: The attribute used for ``:lang()`` depends on the document language:
#: http://www.w3.org/TR/selectors/#lang-pseudo
lang_attribute = 'xml:lang'
#: The case sensitivity of document language element names,
#: attribute names, and attribute values in selectors depends
#: on the document language.
#: http://www.w3.org/TR/selectors/#casesens
#:
#: When a document language defines one of these as case-insensitive,
#: cssselect assumes that the document parser makes the parsed values
#: lower-case. Making the selector lower-case too makes the comparaison
#: case-insensitive.
#:
#: In HTML, element names and attributes names (but not attribute values)
#: are case-insensitive. All of lxml.html, html5lib, BeautifulSoup4
#: and HTMLParser make them lower-case in their parse result, so
#: the assumption holds.
lower_case_element_names = False
lower_case_attribute_names = False
lower_case_attribute_values = False
# class used to represent and xpath expression
xpathexpr_cls = XPathExpr
def css_to_xpath(self, css, prefix='descendant-or-self::'):
"""Translate a *group of selectors* to XPath.
Pseudo-elements are not supported here since XPath only knows
about "real" elements.
:param css:
A *group of selectors* as an Unicode string.
:param prefix:
This string is prepended to the XPath expression for each selector.
The default makes selectors scoped to the context node’s subtree.
:raises:
:class:`SelectorSyntaxError` on invalid selectors,
:class:`ExpressionError` on unknown/unsupported selectors,
including pseudo-elements.
:returns:
The equivalent XPath 1.0 expression as an Unicode string.
"""
return ' | '.join(self.selector_to_xpath(selector, prefix,
translate_pseudo_elements=True)
for selector in parse(css))
def selector_to_xpath(self, selector, prefix='descendant-or-self::',
translate_pseudo_elements=False):
"""Translate a parsed selector to XPath.
:param selector:
A parsed :class:`Selector` object.
:param prefix:
This string is prepended to the resulting XPath expression.
The default makes selectors scoped to the context node’s subtree.
:param translate_pseudo_elements:
Unless this is set to ``True`` (as :meth:`css_to_xpath` does),
the :attr:`~Selector.pseudo_element` attribute of the selector
is ignored.
It is the caller's responsibility to reject selectors
with pseudo-elements, or to account for them somehow.
:raises:
:class:`ExpressionError` on unknown/unsupported selectors.
:returns:
The equivalent XPath 1.0 expression as an Unicode string.
"""
tree = getattr(selector, 'parsed_tree', None)
if not tree:
raise TypeError('Expected a parsed selector, got %r' % (selector,))
xpath = self.xpath(tree)
assert isinstance(xpath, self.xpathexpr_cls) # help debug a missing 'return'
if translate_pseudo_elements and selector.pseudo_element:
xpath = self.xpath_pseudo_element(xpath, selector.pseudo_element)
return (prefix or '') + _unicode(xpath)
def xpath_pseudo_element(self, xpath, pseudo_element):
"""Translate a pseudo-element.
Defaults to not supporting pseudo-elements at all,
but can be overridden by sub-classes.
"""
raise ExpressionError('Pseudo-elements are not supported.')
@staticmethod
def xpath_literal(s):
s = _unicode(s)
if "'" not in s:
s = "'%s'" % s
elif '"' not in s:
s = '"%s"' % s
else:
s = "concat(%s)" % ','.join([
(("'" in part) and '"%s"' or "'%s'") % part
for part in split_at_single_quotes(s) if part
])
return s
def xpath(self, parsed_selector):
"""Translate any parsed selector object."""
type_name = type(parsed_selector).__name__
method = getattr(self, 'xpath_%s' % type_name.lower(), None)
if method is None:
raise ExpressionError('%s is not supported.' % type_name)
return method(parsed_selector)
# Dispatched by parsed object type
def xpath_combinedselector(self, combined):
"""Translate a combined selector."""
combinator = self.combinator_mapping[combined.combinator]
method = getattr(self, 'xpath_%s_combinator' % combinator)
return method(self.xpath(combined.selector),
self.xpath(combined.subselector))
def xpath_negation(self, negation):
xpath = self.xpath(negation.selector)
sub_xpath = self.xpath(negation.subselector)
sub_xpath.add_name_test()
if sub_xpath.condition:
return xpath.add_condition('not(%s)' % sub_xpath.condition)
else:
return xpath.add_condition('0')
def xpath_function(self, function):
"""Translate a functional pseudo-class."""
method = 'xpath_%s_function' % function.name.replace('-', '_')
method = _unicode_safe_getattr(self, method, None)
if not method:
raise ExpressionError(
"The pseudo-class :%s() is unknown" % function.name)
return method(self.xpath(function.selector), function)
def xpath_pseudo(self, pseudo):
"""Translate a pseudo-class."""
method = 'xpath_%s_pseudo' % pseudo.ident.replace('-', '_')
method = _unicode_safe_getattr(self, method, None)
if not method:
# TODO: better error message for pseudo-elements?
raise ExpressionError(
"The pseudo-class :%s is unknown" % pseudo.ident)
return method(self.xpath(pseudo.selector))
def xpath_attrib(self, selector):
"""Translate an attribute selector."""
operator = self.attribute_operator_mapping[selector.operator]
method = getattr(self, 'xpath_attrib_%s' % operator)
if self.lower_case_attribute_names:
name = selector.attrib.lower()
else:
name = selector.attrib
safe = is_safe_name(name)
if selector.namespace:
name = '%s:%s' % (selector.namespace, name)
safe = safe and is_safe_name(selector.namespace)
if safe:
attrib = '@' + name
else:
attrib = 'attribute::*[name() = %s]' % self.xpath_literal(name)
if self.lower_case_attribute_values:
value = selector.value.lower()
else:
value = selector.value
return method(self.xpath(selector.selector), attrib, value)
def xpath_class(self, class_selector):
"""Translate a class selector."""
# .foo is defined as [class~=foo] in the spec.
xpath = self.xpath(class_selector.selector)
return self.xpath_attrib_includes(
xpath, '@class', class_selector.class_name)
def xpath_hash(self, id_selector):
"""Translate an ID selector."""
xpath = self.xpath(id_selector.selector)
return self.xpath_attrib_equals(xpath, '@id', id_selector.id)
def xpath_element(self, selector):
"""Translate a type or universal selector."""
element = selector.element
if not element:
element = '*'
safe = True
else:
safe = is_safe_name(element)
if self.lower_case_element_names:
element = element.lower()
if selector.namespace:
# Namespace prefixes are case-sensitive.
# http://www.w3.org/TR/css3-namespace/#prefixes
element = '%s:%s' % (selector.namespace, element)
safe = safe and is_safe_name(selector.namespace)
xpath = self.xpathexpr_cls(element=element)
if not safe:
xpath.add_name_test()
return xpath
# CombinedSelector: dispatch by combinator
def xpath_descendant_combinator(self, left, right):
"""right is a child, grand-child or further descendant of left"""
return left.join('/descendant-or-self::*/', right)
def xpath_child_combinator(self, left, right):
"""right is an immediate child of left"""
return left.join('/', right)
def xpath_direct_adjacent_combinator(self, left, right):
"""right is a sibling immediately after left"""
xpath = left.join('/following-sibling::', right)
xpath.add_name_test()
return xpath.add_condition('position() = 1')
def xpath_indirect_adjacent_combinator(self, left, right):
"""right is a sibling after left, immediately or not"""
return left.join('/following-sibling::', right)
# Function: dispatch by function/pseudo-class name
def xpath_nth_child_function(self, xpath, function, last=False,
add_name_test=True):
try:
a, b = parse_series(function.arguments)
except ValueError:
raise ExpressionError("Invalid series: '%r'" % function.arguments)
if add_name_test:
xpath.add_name_test()
xpath.add_star_prefix()
if a == 0:
if last:
b = 'last() - %s' % b
return xpath.add_condition('position() = %s' % b)
if last:
# FIXME: I'm not sure if this is right
a = -a
b = -b
if b > 0:
b_neg = str(-b)
else:
b_neg = '+%s' % (-b)
if a != 1:
expr = ['(position() %s) mod %s = 0' % (b_neg, a)]
else:
expr = []
if b >= 0:
expr.append('position() >= %s' % b)
elif b < 0 and last:
expr.append('position() < (last() %s)' % b)
expr = ' and '.join(expr)
if expr:
xpath.add_condition(expr)
return xpath
# FIXME: handle an+b, odd, even
# an+b means every-a, plus b, e.g., 2n+1 means odd
# 0n+b means b
# n+0 means a=1, i.e., all elements
# an means every a elements, i.e., 2n means even
# -n means -1n
# -1n+6 means elements 6 and previous
def xpath_nth_last_child_function(self, xpath, function):
return self.xpath_nth_child_function(xpath, function, last=True)
def xpath_nth_of_type_function(self, xpath, function):
if xpath.element == '*':
raise ExpressionError(
"*:nth-of-type() is not implemented")
return self.xpath_nth_child_function(xpath, function,
add_name_test=False)
def xpath_nth_last_of_type_function(self, xpath, function):
if xpath.element == '*':
raise ExpressionError(
"*:nth-of-type() is not implemented")
return self.xpath_nth_child_function(xpath, function, last=True,
add_name_test=False)
def xpath_contains_function(self, xpath, function):
# Defined there, removed in later drafts:
# http://www.w3.org/TR/2001/CR-css3-selectors-20011113/#content-selectors
if function.argument_types() not in (['STRING'], ['IDENT']):
raise ExpressionError(
"Expected a single string or ident for :contains(), got %r"
% function.arguments)
value = function.arguments[0].value
return xpath.add_condition(
'contains(., %s)' % self.xpath_literal(value))
def xpath_lang_function(self, xpath, function):
if function.argument_types() not in (['STRING'], ['IDENT']):
raise ExpressionError(
"Expected a single string or ident for :lang(), got %r"
% function.arguments)
value = function.arguments[0].value
return xpath.add_condition(
"lang(%s)" % (self.xpath_literal(value)))
# Pseudo: dispatch by pseudo-class name
def xpath_root_pseudo(self, xpath):
return xpath.add_condition("not(parent::*)")
def xpath_first_child_pseudo(self, xpath):
xpath.add_star_prefix()
xpath.add_name_test()
return xpath.add_condition('position() = 1')
def xpath_last_child_pseudo(self, xpath):
xpath.add_star_prefix()
xpath.add_name_test()
return xpath.add_condition('position() = last()')
def xpath_first_of_type_pseudo(self, xpath):
if xpath.element == '*':
raise ExpressionError(
"*:first-of-type is not implemented")
xpath.add_star_prefix()
return xpath.add_condition('position() = 1')
def xpath_last_of_type_pseudo(self, xpath):
if xpath.element == '*':
raise ExpressionError(
"*:last-of-type is not implemented")
xpath.add_star_prefix()
return xpath.add_condition('position() = last()')
def xpath_only_child_pseudo(self, xpath):
xpath.add_name_test()
xpath.add_star_prefix()
return xpath.add_condition('last() = 1')
def xpath_only_of_type_pseudo(self, xpath):
if xpath.element == '*':
raise ExpressionError(
"*:only-of-type is not implemented")
return xpath.add_condition('last() = 1')
def xpath_empty_pseudo(self, xpath):
return xpath.add_condition("not(*) and not(string-length())")
def pseudo_never_matches(self, xpath):
"""Common implementation for pseudo-classes that never match."""
return xpath.add_condition("0")
xpath_link_pseudo = pseudo_never_matches
xpath_visited_pseudo = pseudo_never_matches
xpath_hover_pseudo = pseudo_never_matches
xpath_active_pseudo = pseudo_never_matches
xpath_focus_pseudo = pseudo_never_matches
xpath_target_pseudo = pseudo_never_matches
xpath_enabled_pseudo = pseudo_never_matches
xpath_disabled_pseudo = pseudo_never_matches
xpath_checked_pseudo = pseudo_never_matches
# Attrib: dispatch by attribute operator
def xpath_attrib_exists(self, xpath, name, value):
assert not value
xpath.add_condition(name)
return xpath
def xpath_attrib_equals(self, xpath, name, value):
xpath.add_condition('%s = %s' % (name, self.xpath_literal(value)))
return xpath
def xpath_attrib_different(self, xpath, name, value):
# FIXME: this seems like a weird hack...
if value:
xpath.add_condition('not(%s) or %s != %s'
% (name, name, self.xpath_literal(value)))
else:
xpath.add_condition('%s != %s'
% (name, self.xpath_literal(value)))
return xpath
def xpath_attrib_includes(self, xpath, name, value):
if is_non_whitespace(value):
xpath.add_condition(
"%s and contains(concat(' ', normalize-space(%s), ' '), %s)"
% (name, name, self.xpath_literal(' '+value+' ')))
else:
xpath.add_condition('0')
return xpath
def xpath_attrib_dashmatch(self, xpath, name, value):
# Weird, but true...
xpath.add_condition('%s and (%s = %s or starts-with(%s, %s))' % (
name,
name, self.xpath_literal(value),
name, self.xpath_literal(value + '-')))
return xpath
def xpath_attrib_prefixmatch(self, xpath, name, value):
if value:
xpath.add_condition('%s and starts-with(%s, %s)' % (
name, name, self.xpath_literal(value)))
else:
xpath.add_condition('0')
return xpath
def xpath_attrib_suffixmatch(self, xpath, name, value):
if value:
# Oddly there is a starts-with in XPath 1.0, but not ends-with
xpath.add_condition(
'%s and substring(%s, string-length(%s)-%s) = %s'
% (name, name, name, len(value)-1, self.xpath_literal(value)))
else:
xpath.add_condition('0')
return xpath
def xpath_attrib_substringmatch(self, xpath, name, value):
if value:
# Attribute selectors are case sensitive
xpath.add_condition('%s and contains(%s, %s)' % (
name, name, self.xpath_literal(value)))
else:
xpath.add_condition('0')
return xpath
class HTMLTranslator(GenericTranslator):
"""
Translator for (X)HTML documents.
Has a more useful implementation of some pseudo-classes based on
HTML-specific element names and attribute names, as described in
the `HTML5 specification`_. It assumes no-quirks mode.
The API is the same as :class:`GenericTranslator`.
.. _HTML5 specification: http://www.w3.org/TR/html5/links.html#selectors
:param xhtml:
If false (the default), element names and attribute names
are case-insensitive.
"""
lang_attribute = 'lang'
def __init__(self, xhtml=False):
self.xhtml = xhtml # Might be useful for sub-classes?
if not xhtml:
# See their definition in GenericTranslator.
self.lower_case_element_names = True
self.lower_case_attribute_names = True
def xpath_checked_pseudo(self, xpath):
# FIXME: is this really all the elements?
return xpath.add_condition(
"(@selected and name(.) = 'option') or "
"(@checked "
"and (name(.) = 'input' or name(.) = 'command')"
"and (@type = 'checkbox' or @type = 'radio'))")
def xpath_lang_function(self, xpath, function):
if function.argument_types() not in (['STRING'], ['IDENT']):
raise ExpressionError(
"Expected a single string or ident for :lang(), got %r"
% function.arguments)
value = function.arguments[0].value
return xpath.add_condition(
"ancestor-or-self::*[@lang][1][starts-with(concat("
# XPath 1.0 has no lower-case function...
"translate(@%s, 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', "
"'abcdefghijklmnopqrstuvwxyz'), "
"'-'), %s)]"
% (self.lang_attribute, self.xpath_literal(value.lower() + '-')))
def xpath_link_pseudo(self, xpath):
return xpath.add_condition("@href and "
"(name(.) = 'a' or name(.) = 'link' or name(.) = 'area')")
# Links are never visited, the implementation for :visited is the same
# as in GenericTranslator
def xpath_disabled_pseudo(self, xpath):
# http://www.w3.org/TR/html5/section-index.html#attributes-1
return xpath.add_condition('''
(
@disabled and
(
(name(.) = 'input' and @type != 'hidden') or
name(.) = 'button' or
name(.) = 'select' or
name(.) = 'textarea' or
name(.) = 'command' or
name(.) = 'fieldset' or
name(.) = 'optgroup' or
name(.) = 'option'
)
) or (
(
(name(.) = 'input' and @type != 'hidden') or
name(.) = 'button' or
name(.) = 'select' or
name(.) = 'textarea'
)
and ancestor::fieldset[@disabled]
)
''')
# FIXME: in the second half, add "and is not a descendant of that
# fieldset element's first legend element child, if any."
def xpath_enabled_pseudo(self, xpath):
# http://www.w3.org/TR/html5/section-index.html#attributes-1
return xpath.add_condition('''
(
@href and (
name(.) = 'a' or
name(.) = 'link' or
name(.) = 'area'
)
) or (
(
name(.) = 'command' or
name(.) = 'fieldset' or
name(.) = 'optgroup'
)
and not(@disabled)
) or (
(
(name(.) = 'input' and @type != 'hidden') or
name(.) = 'button' or
name(.) = 'select' or
name(.) = 'textarea' or
name(.) = 'keygen'
)
and not (@disabled or ancestor::fieldset[@disabled])
) or (
name(.) = 'option' and not(
@disabled or ancestor::optgroup[@disabled]
)
)
''')
# FIXME: ... or "li elements that are children of menu elements,
# and that have a child element that defines a command, if the first
# such element's Disabled State facet is false (not disabled)".
# FIXME: after ancestor::fieldset[@disabled], add "and is not a
# descendant of that fieldset element's first legend element child,
# if any."
| mit |
harlequin/sickbeard | lib/growl/gntp.py | 150 | 12750 | import re
import hashlib
import time
import platform
__version__ = '0.1'
class BaseError(Exception):
pass
class ParseError(BaseError):
def gntp_error(self):
error = GNTPError(errorcode=500,errordesc='Error parsing the message')
return error.encode()
class AuthError(BaseError):
def gntp_error(self):
error = GNTPError(errorcode=400,errordesc='Error with authorization')
return error.encode()
class UnsupportedError(BaseError):
def gntp_error(self):
error = GNTPError(errorcode=500,errordesc='Currently unsupported by gntp.py')
return error.encode()
class _GNTPBase(object):
def __init__(self,messagetype):
self.info = {
'version':'1.0',
'messagetype':messagetype,
'encryptionAlgorithmID':None
}
self.requiredHeaders = []
self.headers = {}
def add_origin_info(self):
self.add_header('Origin-Machine-Name',platform.node())
self.add_header('Origin-Software-Name','gntp.py')
self.add_header('Origin-Software-Version',__version__)
self.add_header('Origin-Platform-Name',platform.system())
self.add_header('Origin-Platform-Version',platform.platform())
def send(self):
print self.encode()
def __str__(self):
return self.encode()
def parse_info(self,data):
'''
Parse the first line of a GNTP message to get security and other info values
@param data: GNTP Message
@return: GNTP Message information in a dictionary
'''
#GNTP/<version> <messagetype> <encryptionAlgorithmID>[:<ivValue>][ <keyHashAlgorithmID>:<keyHash>.<salt>]
match = re.match('GNTP/(?P<version>\d+\.\d+) (?P<messagetype>REGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)'+
' (?P<encryptionAlgorithmID>[A-Z0-9]+(:(?P<ivValue>[A-F0-9]+))?) ?'+
'((?P<keyHashAlgorithmID>[A-Z0-9]+):(?P<keyHash>[A-F0-9]+).(?P<salt>[A-F0-9]+))?\r\n', data,re.IGNORECASE)
if not match:
raise ParseError('ERROR_PARSING_INFO_LINE')
info = match.groupdict()
if info['encryptionAlgorithmID'] == 'NONE':
info['encryptionAlgorithmID'] = None
return info
def set_password(self,password,encryptAlgo='MD5'):
'''
Set a password for a GNTP Message
@param password: Null to clear password
@param encryptAlgo: Currently only supports MD5
@todo: Support other hash functions
'''
self.password = password
if not password:
self.info['encryptionAlgorithmID'] = None
self.info['keyHashAlgorithm'] = None;
return
password = password.encode('utf8')
seed = time.ctime()
salt = hashlib.md5(seed).hexdigest()
saltHash = hashlib.md5(seed).digest()
keyBasis = password+saltHash
key = hashlib.md5(keyBasis).digest()
keyHash = hashlib.md5(key).hexdigest()
self.info['keyHashAlgorithmID'] = encryptAlgo.upper()
self.info['keyHash'] = keyHash.upper()
self.info['salt'] = salt.upper()
def _decode_hex(self,value):
'''
Helper function to decode hex string to `proper` hex string
@param value: Value to decode
@return: Hex string
'''
result = ''
for i in range(0,len(value),2):
tmp = int(value[i:i+2],16)
result += chr(tmp)
return result
def _decode_binary(self,rawIdentifier,identifier):
rawIdentifier += '\r\n\r\n'
dataLength = int(identifier['Length'])
pointerStart = self.raw.find(rawIdentifier)+len(rawIdentifier)
pointerEnd = pointerStart + dataLength
data = self.raw[pointerStart:pointerEnd]
if not len(data) == dataLength:
raise ParseError('INVALID_DATA_LENGTH Expected: %s Recieved %s'%(dataLength,len(data)))
return data
def validate_password(self,password):
'''
Validate GNTP Message against stored password
'''
self.password = password
if password == None: raise Exception()
keyHash = self.info.get('keyHash',None)
if keyHash is None and self.password is None:
return True
if keyHash is None:
raise AuthError('Invalid keyHash')
if self.password is None:
raise AuthError('Missing password')
password = self.password.encode('utf8')
saltHash = self._decode_hex(self.info['salt'])
keyBasis = password+saltHash
key = hashlib.md5(keyBasis).digest()
keyHash = hashlib.md5(key).hexdigest()
if not keyHash.upper() == self.info['keyHash'].upper():
raise AuthError('Invalid Hash')
return True
def validate(self):
'''
Verify required headers
'''
for header in self.requiredHeaders:
if not self.headers.get(header,False):
raise ParseError('Missing Notification Header: '+header)
def format_info(self):
'''
Generate info line for GNTP Message
@return: Info line string
'''
info = u'GNTP/%s %s'%(
self.info.get('version'),
self.info.get('messagetype'),
)
if self.info.get('encryptionAlgorithmID',None):
info += ' %s:%s'%(
self.info.get('encryptionAlgorithmID'),
self.info.get('ivValue'),
)
else:
info+=' NONE'
if self.info.get('keyHashAlgorithmID',None):
info += ' %s:%s.%s'%(
self.info.get('keyHashAlgorithmID'),
self.info.get('keyHash'),
self.info.get('salt')
)
return info
def parse_dict(self,data):
'''
Helper function to parse blocks of GNTP headers into a dictionary
@param data:
@return: Dictionary of headers
'''
dict = {}
for line in data.split('\r\n'):
match = re.match('([\w-]+):(.+)', line)
if not match: continue
key = match.group(1).strip()
val = match.group(2).strip()
dict[key] = val
#print key,'\t\t\t',val
return dict
def add_header(self,key,value):
self.headers[key] = value
def decode(self,data,password=None):
'''
Decode GNTP Message
@param data:
'''
self.password = password
self.raw = data
parts = self.raw.split('\r\n\r\n')
self.info = self.parse_info(data)
self.headers = self.parse_dict(parts[0])
def encode(self):
'''
Encode a GNTP Message
@return: GNTP Message ready to be sent
'''
self.validate()
SEP = u': '
EOL = u'\r\n'
message = self.format_info() + EOL
#Headers
for k,v in self.headers.iteritems():
message += k.encode('utf8') + SEP + str(v).encode('utf8') + EOL
message += EOL
return message
class GNTPRegister(_GNTPBase):
'''
GNTP Registration Message
'''
def __init__(self,data=None,password=None):
'''
@param data: (Optional) See decode()
@param password: (Optional) Password to use while encoding/decoding messages
'''
_GNTPBase.__init__(self,'REGISTER')
self.notifications = []
self.resources = {}
self.requiredHeaders = [
'Application-Name',
'Notifications-Count'
]
self.requiredNotification = [
'Notification-Name',
]
if data:
self.decode(data,password)
else:
self.set_password(password)
self.headers['Application-Name'] = 'pygntp'
self.headers['Notifications-Count'] = 0
self.add_origin_info()
def validate(self):
'''
Validate required headers and validate notification headers
'''
for header in self.requiredHeaders:
if not self.headers.get(header,False):
raise ParseError('Missing Registration Header: '+header)
for notice in self.notifications:
for header in self.requiredNotification:
if not notice.get(header,False):
raise ParseError('Missing Notification Header: '+header)
def decode(self,data,password):
'''
Decode existing GNTP Registration message
@param data: Message to decode.
'''
self.raw = data
parts = self.raw.split('\r\n\r\n')
self.info = self.parse_info(data)
self.validate_password(password)
self.headers = self.parse_dict(parts[0])
for i,part in enumerate(parts):
if i==0: continue #Skip Header
if part.strip()=='': continue
notice = self.parse_dict(part)
if notice.get('Notification-Name',False):
self.notifications.append(notice)
elif notice.get('Identifier',False):
notice['Data'] = self._decode_binary(part,notice)
#open('register.png','wblol').write(notice['Data'])
self.resources[ notice.get('Identifier') ] = notice
def add_notification(self,name,enabled=True):
'''
Add new Notification to Registration message
@param name: Notification Name
@param enabled: Default Notification to Enabled
'''
notice = {}
notice['Notification-Name'] = name
notice['Notification-Enabled'] = str(enabled)
self.notifications.append(notice)
self.headers['Notifications-Count'] = len(self.notifications)
def encode(self):
'''
Encode a GNTP Registration Message
@return: GNTP Registration Message ready to be sent
'''
self.validate()
SEP = u': '
EOL = u'\r\n'
message = self.format_info() + EOL
#Headers
for k,v in self.headers.iteritems():
message += k.encode('utf8') + SEP + str(v).encode('utf8') + EOL
#Notifications
if len(self.notifications)>0:
for notice in self.notifications:
message += EOL
for k,v in notice.iteritems():
message += k.encode('utf8') + SEP + str(v).encode('utf8') + EOL
message += EOL
return message
class GNTPNotice(_GNTPBase):
'''
GNTP Notification Message
'''
def __init__(self,data=None,app=None,name=None,title=None,password=None):
'''
@param data: (Optional) See decode()
@param app: (Optional) Set Application-Name
@param name: (Optional) Set Notification-Name
@param title: (Optional) Set Notification Title
@param password: (Optional) Password to use while encoding/decoding messages
'''
_GNTPBase.__init__(self,'NOTIFY')
self.resources = {}
self.requiredHeaders = [
'Application-Name',
'Notification-Name',
'Notification-Title'
]
if data:
self.decode(data,password)
else:
self.set_password(password)
if app:
self.headers['Application-Name'] = app
if name:
self.headers['Notification-Name'] = name
if title:
self.headers['Notification-Title'] = title
self.add_origin_info()
def decode(self,data,password):
'''
Decode existing GNTP Notification message
@param data: Message to decode.
'''
self.raw = data
parts = self.raw.split('\r\n\r\n')
self.info = self.parse_info(data)
self.validate_password(password)
self.headers = self.parse_dict(parts[0])
for i,part in enumerate(parts):
if i==0: continue #Skip Header
if part.strip()=='': continue
notice = self.parse_dict(part)
if notice.get('Identifier',False):
notice['Data'] = self._decode_binary(part,notice)
#open('notice.png','wblol').write(notice['Data'])
self.resources[ notice.get('Identifier') ] = notice
def encode(self):
'''
Encode a GNTP Notification Message
@return: GNTP Notification Message ready to be sent
'''
self.validate()
SEP = u': '
EOL = u'\r\n'
message = self.format_info() + EOL
#Headers
for k,v in self.headers.iteritems():
message += k + SEP + unicode(v) + EOL
message += EOL
return message.encode('utf-8')
class GNTPSubscribe(_GNTPBase):
def __init__(self,data=None,password=None):
_GNTPBase.__init__(self, 'SUBSCRIBE')
self.requiredHeaders = [
'Subscriber-ID',
'Subscriber-Name',
]
if data:
self.decode(data,password)
else:
self.set_password(password)
self.add_origin_info()
class GNTPOK(_GNTPBase):
def __init__(self,data=None,action=None):
'''
@param data: (Optional) See _GNTPResponse.decode()
@param action: (Optional) Set type of action the OK Response is for
'''
_GNTPBase.__init__(self,'-OK')
self.requiredHeaders = ['Response-Action']
if data:
self.decode(data)
if action:
self.headers['Response-Action'] = action
self.add_origin_info()
class GNTPError(_GNTPBase):
def __init__(self,data=None,errorcode=None,errordesc=None):
'''
@param data: (Optional) See _GNTPResponse.decode()
@param errorcode: (Optional) Error code
@param errordesc: (Optional) Error Description
'''
_GNTPBase.__init__(self,'-ERROR')
self.requiredHeaders = ['Error-Code','Error-Description']
if data:
self.decode(data)
if errorcode:
self.headers['Error-Code'] = errorcode
self.headers['Error-Description'] = errordesc
self.add_origin_info()
def parse_gntp(data,password=None,debug=False):
'''
Attempt to parse a message as a GNTP message
@param data: Message to be parsed
@param password: Optional password to be used to verify the message
@param debug: Print out extra debugging information
'''
match = re.match('GNTP/(?P<version>\d+\.\d+) (?P<messagetype>REGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)',data,re.IGNORECASE)
if not match:
if debug:
print '----'
print self.data
print '----'
raise ParseError('INVALID_GNTP_INFO')
info = match.groupdict()
if info['messagetype'] == 'REGISTER':
return GNTPRegister(data,password=password)
elif info['messagetype'] == 'NOTIFY':
return GNTPNotice(data,password=password)
elif info['messagetype'] == 'SUBSCRIBE':
return GNTPSubscribe(data,password=password)
elif info['messagetype'] == '-OK':
return GNTPOK(data)
elif info['messagetype'] == '-ERROR':
return GNTPError(data)
if debug: print info
raise ParseError('INVALID_GNTP_MESSAGE')
| gpl-3.0 |
afunTW/lbps | src/bearer.py | 1 | 1735 | import sys
import random
import logging
sys.path.append('..')
from src.traffic import Traffic
from lbps.structure import device
class Bearer(object):
def __init__(self, src, dest, CQI=None, flow=None):
assert isinstance(src, device.OneHopDevice) or\
isinstance(dest, device.OneHopDevice),\
'connection device is not the lbps.structure.device instances'
self.__source = src
self.__destination = dest
self.__flow = None
self.__CQI = None
self.__CQI_type = {
'L': [1,2,3,4,5,6],
'M': [7,8,9],
'H': [10,11,12,13,14,15]
}
self.CQI = CQI
self.flow = flow
@property
def source(self):
return self.__source
@property
def destination(self):
return self.__destination
@property
def CQI(self):
return self.__CQI
@CQI.setter
def CQI(self, value):
if isinstance(value, str) and value in self.__CQI_type.keys():
self.__CQI = random.choice(self.__CQI_type[value])
elif isinstance(value, list) and \
set(value).issubset(list(self.__CQI_type.keys())):
cqi_range = [self.__CQI_type[t] for t in value]
cqi_range = [_ for item in cqi_range for _ in item]
self.__CQI = random.choice(cqi_range)
elif isinstance(value, int) and value > 0 and value < 16:
self.__CQI = value
@property
def flow(self):
return self.__flow
@flow.setter
def flow(self, flow_type):
try:
assert isinstance(flow_type, Traffic), 'not the Traffic instance'
self.__flow = flow_type
except Exception as e:
logging.exception(e)
| mit |
xionzz/earthquake | venv/lib/python2.7/site-packages/numpy/doc/subclassing.py | 139 | 20225 | """
=============================
Subclassing ndarray in python
=============================
Credits
-------
This page is based with thanks on the wiki page on subclassing by Pierre
Gerard-Marchant - http://www.scipy.org/Subclasses.
Introduction
------------
Subclassing ndarray is relatively simple, but it has some complications
compared to other Python objects. On this page we explain the machinery
that allows you to subclass ndarray, and the implications for
implementing a subclass.
ndarrays and object creation
============================
Subclassing ndarray is complicated by the fact that new instances of
ndarray classes can come about in three different ways. These are:
#. Explicit constructor call - as in ``MySubClass(params)``. This is
the usual route to Python instance creation.
#. View casting - casting an existing ndarray as a given subclass
#. New from template - creating a new instance from a template
instance. Examples include returning slices from a subclassed array,
creating return types from ufuncs, and copying arrays. See
:ref:`new-from-template` for more details
The last two are characteristics of ndarrays - in order to support
things like array slicing. The complications of subclassing ndarray are
due to the mechanisms numpy has to support these latter two routes of
instance creation.
.. _view-casting:
View casting
------------
*View casting* is the standard ndarray mechanism by which you take an
ndarray of any subclass, and return a view of the array as another
(specified) subclass:
>>> import numpy as np
>>> # create a completely useless ndarray subclass
>>> class C(np.ndarray): pass
>>> # create a standard ndarray
>>> arr = np.zeros((3,))
>>> # take a view of it, as our useless subclass
>>> c_arr = arr.view(C)
>>> type(c_arr)
<class 'C'>
.. _new-from-template:
Creating new from template
--------------------------
New instances of an ndarray subclass can also come about by a very
similar mechanism to :ref:`view-casting`, when numpy finds it needs to
create a new instance from a template instance. The most obvious place
this has to happen is when you are taking slices of subclassed arrays.
For example:
>>> v = c_arr[1:]
>>> type(v) # the view is of type 'C'
<class 'C'>
>>> v is c_arr # but it's a new instance
False
The slice is a *view* onto the original ``c_arr`` data. So, when we
take a view from the ndarray, we return a new ndarray, of the same
class, that points to the data in the original.
There are other points in the use of ndarrays where we need such views,
such as copying arrays (``c_arr.copy()``), creating ufunc output arrays
(see also :ref:`array-wrap`), and reducing methods (like
``c_arr.mean()``.
Relationship of view casting and new-from-template
--------------------------------------------------
These paths both use the same machinery. We make the distinction here,
because they result in different input to your methods. Specifically,
:ref:`view-casting` means you have created a new instance of your array
type from any potential subclass of ndarray. :ref:`new-from-template`
means you have created a new instance of your class from a pre-existing
instance, allowing you - for example - to copy across attributes that
are particular to your subclass.
Implications for subclassing
----------------------------
If we subclass ndarray, we need to deal not only with explicit
construction of our array type, but also :ref:`view-casting` or
:ref:`new-from-template`. Numpy has the machinery to do this, and this
machinery that makes subclassing slightly non-standard.
There are two aspects to the machinery that ndarray uses to support
views and new-from-template in subclasses.
The first is the use of the ``ndarray.__new__`` method for the main work
of object initialization, rather then the more usual ``__init__``
method. The second is the use of the ``__array_finalize__`` method to
allow subclasses to clean up after the creation of views and new
instances from templates.
A brief Python primer on ``__new__`` and ``__init__``
=====================================================
``__new__`` is a standard Python method, and, if present, is called
before ``__init__`` when we create a class instance. See the `python
__new__ documentation
<http://docs.python.org/reference/datamodel.html#object.__new__>`_ for more detail.
For example, consider the following Python code:
.. testcode::
class C(object):
def __new__(cls, *args):
print 'Cls in __new__:', cls
print 'Args in __new__:', args
return object.__new__(cls, *args)
def __init__(self, *args):
print 'type(self) in __init__:', type(self)
print 'Args in __init__:', args
meaning that we get:
>>> c = C('hello')
Cls in __new__: <class 'C'>
Args in __new__: ('hello',)
type(self) in __init__: <class 'C'>
Args in __init__: ('hello',)
When we call ``C('hello')``, the ``__new__`` method gets its own class
as first argument, and the passed argument, which is the string
``'hello'``. After python calls ``__new__``, it usually (see below)
calls our ``__init__`` method, with the output of ``__new__`` as the
first argument (now a class instance), and the passed arguments
following.
As you can see, the object can be initialized in the ``__new__``
method or the ``__init__`` method, or both, and in fact ndarray does
not have an ``__init__`` method, because all the initialization is
done in the ``__new__`` method.
Why use ``__new__`` rather than just the usual ``__init__``? Because
in some cases, as for ndarray, we want to be able to return an object
of some other class. Consider the following:
.. testcode::
class D(C):
def __new__(cls, *args):
print 'D cls is:', cls
print 'D args in __new__:', args
return C.__new__(C, *args)
def __init__(self, *args):
# we never get here
print 'In D __init__'
meaning that:
>>> obj = D('hello')
D cls is: <class 'D'>
D args in __new__: ('hello',)
Cls in __new__: <class 'C'>
Args in __new__: ('hello',)
>>> type(obj)
<class 'C'>
The definition of ``C`` is the same as before, but for ``D``, the
``__new__`` method returns an instance of class ``C`` rather than
``D``. Note that the ``__init__`` method of ``D`` does not get
called. In general, when the ``__new__`` method returns an object of
class other than the class in which it is defined, the ``__init__``
method of that class is not called.
This is how subclasses of the ndarray class are able to return views
that preserve the class type. When taking a view, the standard
ndarray machinery creates the new ndarray object with something
like::
obj = ndarray.__new__(subtype, shape, ...
where ``subdtype`` is the subclass. Thus the returned view is of the
same class as the subclass, rather than being of class ``ndarray``.
That solves the problem of returning views of the same type, but now
we have a new problem. The machinery of ndarray can set the class
this way, in its standard methods for taking views, but the ndarray
``__new__`` method knows nothing of what we have done in our own
``__new__`` method in order to set attributes, and so on. (Aside -
why not call ``obj = subdtype.__new__(...`` then? Because we may not
have a ``__new__`` method with the same call signature).
The role of ``__array_finalize__``
==================================
``__array_finalize__`` is the mechanism that numpy provides to allow
subclasses to handle the various ways that new instances get created.
Remember that subclass instances can come about in these three ways:
#. explicit constructor call (``obj = MySubClass(params)``). This will
call the usual sequence of ``MySubClass.__new__`` then (if it exists)
``MySubClass.__init__``.
#. :ref:`view-casting`
#. :ref:`new-from-template`
Our ``MySubClass.__new__`` method only gets called in the case of the
explicit constructor call, so we can't rely on ``MySubClass.__new__`` or
``MySubClass.__init__`` to deal with the view casting and
new-from-template. It turns out that ``MySubClass.__array_finalize__``
*does* get called for all three methods of object creation, so this is
where our object creation housekeeping usually goes.
* For the explicit constructor call, our subclass will need to create a
new ndarray instance of its own class. In practice this means that
we, the authors of the code, will need to make a call to
``ndarray.__new__(MySubClass,...)``, or do view casting of an existing
array (see below)
* For view casting and new-from-template, the equivalent of
``ndarray.__new__(MySubClass,...`` is called, at the C level.
The arguments that ``__array_finalize__`` recieves differ for the three
methods of instance creation above.
The following code allows us to look at the call sequences and arguments:
.. testcode::
import numpy as np
class C(np.ndarray):
def __new__(cls, *args, **kwargs):
print 'In __new__ with class %s' % cls
return np.ndarray.__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
# in practice you probably will not need or want an __init__
# method for your subclass
print 'In __init__ with class %s' % self.__class__
def __array_finalize__(self, obj):
print 'In array_finalize:'
print ' self type is %s' % type(self)
print ' obj type is %s' % type(obj)
Now:
>>> # Explicit constructor
>>> c = C((10,))
In __new__ with class <class 'C'>
In array_finalize:
self type is <class 'C'>
obj type is <type 'NoneType'>
In __init__ with class <class 'C'>
>>> # View casting
>>> a = np.arange(10)
>>> cast_a = a.view(C)
In array_finalize:
self type is <class 'C'>
obj type is <type 'numpy.ndarray'>
>>> # Slicing (example of new-from-template)
>>> cv = c[:1]
In array_finalize:
self type is <class 'C'>
obj type is <class 'C'>
The signature of ``__array_finalize__`` is::
def __array_finalize__(self, obj):
``ndarray.__new__`` passes ``__array_finalize__`` the new object, of our
own class (``self``) as well as the object from which the view has been
taken (``obj``). As you can see from the output above, the ``self`` is
always a newly created instance of our subclass, and the type of ``obj``
differs for the three instance creation methods:
* When called from the explicit constructor, ``obj`` is ``None``
* When called from view casting, ``obj`` can be an instance of any
subclass of ndarray, including our own.
* When called in new-from-template, ``obj`` is another instance of our
own subclass, that we might use to update the new ``self`` instance.
Because ``__array_finalize__`` is the only method that always sees new
instances being created, it is the sensible place to fill in instance
defaults for new object attributes, among other tasks.
This may be clearer with an example.
Simple example - adding an extra attribute to ndarray
-----------------------------------------------------
.. testcode::
import numpy as np
class InfoArray(np.ndarray):
def __new__(subtype, shape, dtype=float, buffer=None, offset=0,
strides=None, order=None, info=None):
# Create the ndarray instance of our type, given the usual
# ndarray input arguments. This will call the standard
# ndarray constructor, but return an object of our type.
# It also triggers a call to InfoArray.__array_finalize__
obj = np.ndarray.__new__(subtype, shape, dtype, buffer, offset, strides,
order)
# set the new 'info' attribute to the value passed
obj.info = info
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
# ``self`` is a new object resulting from
# ndarray.__new__(InfoArray, ...), therefore it only has
# attributes that the ndarray.__new__ constructor gave it -
# i.e. those of a standard ndarray.
#
# We could have got to the ndarray.__new__ call in 3 ways:
# From an explicit constructor - e.g. InfoArray():
# obj is None
# (we're in the middle of the InfoArray.__new__
# constructor, and self.info will be set when we return to
# InfoArray.__new__)
if obj is None: return
# From view casting - e.g arr.view(InfoArray):
# obj is arr
# (type(obj) can be InfoArray)
# From new-from-template - e.g infoarr[:3]
# type(obj) is InfoArray
#
# Note that it is here, rather than in the __new__ method,
# that we set the default value for 'info', because this
# method sees all creation of default objects - with the
# InfoArray.__new__ constructor, but also with
# arr.view(InfoArray).
self.info = getattr(obj, 'info', None)
# We do not need to return anything
Using the object looks like this:
>>> obj = InfoArray(shape=(3,)) # explicit constructor
>>> type(obj)
<class 'InfoArray'>
>>> obj.info is None
True
>>> obj = InfoArray(shape=(3,), info='information')
>>> obj.info
'information'
>>> v = obj[1:] # new-from-template - here - slicing
>>> type(v)
<class 'InfoArray'>
>>> v.info
'information'
>>> arr = np.arange(10)
>>> cast_arr = arr.view(InfoArray) # view casting
>>> type(cast_arr)
<class 'InfoArray'>
>>> cast_arr.info is None
True
This class isn't very useful, because it has the same constructor as the
bare ndarray object, including passing in buffers and shapes and so on.
We would probably prefer the constructor to be able to take an already
formed ndarray from the usual numpy calls to ``np.array`` and return an
object.
Slightly more realistic example - attribute added to existing array
-------------------------------------------------------------------
Here is a class that takes a standard ndarray that already exists, casts
as our type, and adds an extra attribute.
.. testcode::
import numpy as np
class RealisticInfoArray(np.ndarray):
def __new__(cls, input_array, info=None):
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array).view(cls)
# add the new attribute to the created instance
obj.info = info
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
# see InfoArray.__array_finalize__ for comments
if obj is None: return
self.info = getattr(obj, 'info', None)
So:
>>> arr = np.arange(5)
>>> obj = RealisticInfoArray(arr, info='information')
>>> type(obj)
<class 'RealisticInfoArray'>
>>> obj.info
'information'
>>> v = obj[1:]
>>> type(v)
<class 'RealisticInfoArray'>
>>> v.info
'information'
.. _array-wrap:
``__array_wrap__`` for ufuncs
-------------------------------------------------------
``__array_wrap__`` gets called at the end of numpy ufuncs and other numpy
functions, to allow a subclass to set the type of the return value
and update attributes and metadata. Let's show how this works with an example.
First we make the same subclass as above, but with a different name and
some print statements:
.. testcode::
import numpy as np
class MySubClass(np.ndarray):
def __new__(cls, input_array, info=None):
obj = np.asarray(input_array).view(cls)
obj.info = info
return obj
def __array_finalize__(self, obj):
print 'In __array_finalize__:'
print ' self is %s' % repr(self)
print ' obj is %s' % repr(obj)
if obj is None: return
self.info = getattr(obj, 'info', None)
def __array_wrap__(self, out_arr, context=None):
print 'In __array_wrap__:'
print ' self is %s' % repr(self)
print ' arr is %s' % repr(out_arr)
# then just call the parent
return np.ndarray.__array_wrap__(self, out_arr, context)
We run a ufunc on an instance of our new array:
>>> obj = MySubClass(np.arange(5), info='spam')
In __array_finalize__:
self is MySubClass([0, 1, 2, 3, 4])
obj is array([0, 1, 2, 3, 4])
>>> arr2 = np.arange(5)+1
>>> ret = np.add(arr2, obj)
In __array_wrap__:
self is MySubClass([0, 1, 2, 3, 4])
arr is array([1, 3, 5, 7, 9])
In __array_finalize__:
self is MySubClass([1, 3, 5, 7, 9])
obj is MySubClass([0, 1, 2, 3, 4])
>>> ret
MySubClass([1, 3, 5, 7, 9])
>>> ret.info
'spam'
Note that the ufunc (``np.add``) has called the ``__array_wrap__`` method of the
input with the highest ``__array_priority__`` value, in this case
``MySubClass.__array_wrap__``, with arguments ``self`` as ``obj``, and
``out_arr`` as the (ndarray) result of the addition. In turn, the
default ``__array_wrap__`` (``ndarray.__array_wrap__``) has cast the
result to class ``MySubClass``, and called ``__array_finalize__`` -
hence the copying of the ``info`` attribute. This has all happened at the C level.
But, we could do anything we wanted:
.. testcode::
class SillySubClass(np.ndarray):
def __array_wrap__(self, arr, context=None):
return 'I lost your data'
>>> arr1 = np.arange(5)
>>> obj = arr1.view(SillySubClass)
>>> arr2 = np.arange(5)
>>> ret = np.multiply(obj, arr2)
>>> ret
'I lost your data'
So, by defining a specific ``__array_wrap__`` method for our subclass,
we can tweak the output from ufuncs. The ``__array_wrap__`` method
requires ``self``, then an argument - which is the result of the ufunc -
and an optional parameter *context*. This parameter is returned by some
ufuncs as a 3-element tuple: (name of the ufunc, argument of the ufunc,
domain of the ufunc). ``__array_wrap__`` should return an instance of
its containing class. See the masked array subclass for an
implementation.
In addition to ``__array_wrap__``, which is called on the way out of the
ufunc, there is also an ``__array_prepare__`` method which is called on
the way into the ufunc, after the output arrays are created but before any
computation has been performed. The default implementation does nothing
but pass through the array. ``__array_prepare__`` should not attempt to
access the array data or resize the array, it is intended for setting the
output array type, updating attributes and metadata, and performing any
checks based on the input that may be desired before computation begins.
Like ``__array_wrap__``, ``__array_prepare__`` must return an ndarray or
subclass thereof or raise an error.
Extra gotchas - custom ``__del__`` methods and ndarray.base
-----------------------------------------------------------
One of the problems that ndarray solves is keeping track of memory
ownership of ndarrays and their views. Consider the case where we have
created an ndarray, ``arr`` and have taken a slice with ``v = arr[1:]``.
The two objects are looking at the same memory. Numpy keeps track of
where the data came from for a particular array or view, with the
``base`` attribute:
>>> # A normal ndarray, that owns its own data
>>> arr = np.zeros((4,))
>>> # In this case, base is None
>>> arr.base is None
True
>>> # We take a view
>>> v1 = arr[1:]
>>> # base now points to the array that it derived from
>>> v1.base is arr
True
>>> # Take a view of a view
>>> v2 = v1[1:]
>>> # base points to the view it derived from
>>> v2.base is v1
True
In general, if the array owns its own memory, as for ``arr`` in this
case, then ``arr.base`` will be None - there are some exceptions to this
- see the numpy book for more details.
The ``base`` attribute is useful in being able to tell whether we have
a view or the original array. This in turn can be useful if we need
to know whether or not to do some specific cleanup when the subclassed
array is deleted. For example, we may only want to do the cleanup if
the original array is deleted, but not the views. For an example of
how this can work, have a look at the ``memmap`` class in
``numpy.core``.
"""
from __future__ import division, absolute_import, print_function
| mit |
Konubinix/weboob | modules/opacwebaloes/browser.py | 7 | 2761 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2012 Jeremy Monnet
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.deprecated.browser import Browser, BrowserIncorrectPassword
from .pages import LoginPage, HomePage, RentedPage, HistoryPage, BookedPage
__all__ = ['AloesBrowser']
# Browser
class AloesBrowser(Browser):
PROTOCOL = 'http'
ENCODING = 'utf-8'
USER_AGENT = Browser.USER_AGENTS['desktop_firefox']
#DEBUG_HTTP = True
DEBUG_HTTP = False
PAGES = {
'http://.*/index.aspx': LoginPage,
'http://.*/index.aspx\?IdPage=1': HomePage,
'http://.*/index.aspx\?IdPage=45': RentedPage,
'http://.*/index.aspx\?IdPage=429': HistoryPage,
'http://.*/index.aspx\?IdPage=44': BookedPage
}
def __init__(self, baseurl, *args, **kwargs):
self.BASEURL = baseurl
Browser.__init__(self, *args, **kwargs)
def is_logged(self):
return self.page \
and not self.page.document.getroot().xpath('//input[contains(@id, "ctl00_ContentPlaceHolder1_ctl00_ctl08_ctl00_TextSaisie")]')
#return True
def login(self):
assert isinstance(self.username, basestring)
assert isinstance(self.password, basestring)
if not self.is_on_page(HomePage):
self.location('%s://%s/index.aspx'
% (self.PROTOCOL, self.BASEURL),
no_login=True)
if not self.page.login(self.username, self.password) or \
not self.is_logged() or \
(self.is_on_page(LoginPage) and self.page.is_error()):
raise BrowserIncorrectPassword()
def get_rented_books_list(self):
if not self.is_on_page(RentedPage):
self.location('%s://%s/index.aspx?IdPage=45'
% (self.PROTOCOL, self.BASEURL)
)
return self.page.get_list()
def get_booked_books_list(self):
if not self.is_on_page(BookedPage):
self.location('%s://%s/index.aspx?IdPage=44'
% (self.PROTOCOL, self.BASEURL))
return self.page.get_list()
| agpl-3.0 |
sander76/home-assistant | homeassistant/components/knx/cover.py | 3 | 6119 | """Support for KNX/IP covers."""
from __future__ import annotations
from datetime import datetime
from typing import Any, Callable, Iterable
from xknx.devices import Cover as XknxCover, Device as XknxDevice
from homeassistant.components.cover import (
ATTR_POSITION,
ATTR_TILT_POSITION,
DEVICE_CLASS_BLIND,
DEVICE_CLASSES,
SUPPORT_CLOSE,
SUPPORT_CLOSE_TILT,
SUPPORT_OPEN,
SUPPORT_OPEN_TILT,
SUPPORT_SET_POSITION,
SUPPORT_SET_TILT_POSITION,
SUPPORT_STOP,
SUPPORT_STOP_TILT,
CoverEntity,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_utc_time_change
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from .const import DOMAIN
from .knx_entity import KnxEntity
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: Callable[[Iterable[Entity]], None],
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up cover(s) for KNX platform."""
entities = []
for device in hass.data[DOMAIN].xknx.devices:
if isinstance(device, XknxCover):
entities.append(KNXCover(device))
async_add_entities(entities)
class KNXCover(KnxEntity, CoverEntity):
"""Representation of a KNX cover."""
def __init__(self, device: XknxCover):
"""Initialize the cover."""
self._device: XknxCover
super().__init__(device)
self._unsubscribe_auto_updater: Callable[[], None] | None = None
@callback
async def after_update_callback(self, device: XknxDevice) -> None:
"""Call after device was updated."""
self.async_write_ha_state()
if self._device.is_traveling():
self.start_auto_updater()
@property
def device_class(self) -> str | None:
"""Return the class of this device, from component DEVICE_CLASSES."""
if self._device.device_class in DEVICE_CLASSES:
return self._device.device_class
if self._device.supports_angle:
return DEVICE_CLASS_BLIND
return None
@property
def supported_features(self) -> int:
"""Flag supported features."""
supported_features = SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_SET_POSITION
if self._device.supports_stop:
supported_features |= SUPPORT_STOP
if self._device.supports_angle:
supported_features |= (
SUPPORT_SET_TILT_POSITION
| SUPPORT_OPEN_TILT
| SUPPORT_CLOSE_TILT
| SUPPORT_STOP_TILT
)
return supported_features
@property
def current_cover_position(self) -> int | None:
"""Return the current position of the cover.
None is unknown, 0 is closed, 100 is fully open.
"""
# In KNX 0 is open, 100 is closed.
pos = self._device.current_position()
return 100 - pos if pos is not None else None
@property
def is_closed(self) -> bool | None:
"""Return if the cover is closed."""
# state shall be "unknown" when xknx travelcalculator is not initialized
if self._device.current_position() is None:
return None
return self._device.is_closed()
@property
def is_opening(self) -> bool:
"""Return if the cover is opening or not."""
return self._device.is_opening()
@property
def is_closing(self) -> bool:
"""Return if the cover is closing or not."""
return self._device.is_closing()
async def async_close_cover(self, **kwargs: Any) -> None:
"""Close the cover."""
await self._device.set_down()
async def async_open_cover(self, **kwargs: Any) -> None:
"""Open the cover."""
await self._device.set_up()
async def async_set_cover_position(self, **kwargs: Any) -> None:
"""Move the cover to a specific position."""
knx_position = 100 - kwargs[ATTR_POSITION]
await self._device.set_position(knx_position)
async def async_stop_cover(self, **kwargs: Any) -> None:
"""Stop the cover."""
await self._device.stop()
self.stop_auto_updater()
@property
def current_cover_tilt_position(self) -> int | None:
"""Return current tilt position of cover."""
if not self._device.supports_angle:
return None
ang = self._device.current_angle()
return 100 - ang if ang is not None else None
async def async_set_cover_tilt_position(self, **kwargs: Any) -> None:
"""Move the cover tilt to a specific position."""
knx_tilt_position = 100 - kwargs[ATTR_TILT_POSITION]
await self._device.set_angle(knx_tilt_position)
async def async_open_cover_tilt(self, **kwargs: Any) -> None:
"""Open the cover tilt."""
await self._device.set_short_up()
async def async_close_cover_tilt(self, **kwargs: Any) -> None:
"""Close the cover tilt."""
await self._device.set_short_down()
async def async_stop_cover_tilt(self, **kwargs: Any) -> None:
"""Stop the cover tilt."""
await self._device.stop()
self.stop_auto_updater()
def start_auto_updater(self) -> None:
"""Start the autoupdater to update Home Assistant while cover is moving."""
if self._unsubscribe_auto_updater is None:
self._unsubscribe_auto_updater = async_track_utc_time_change(
self.hass, self.auto_updater_hook
)
def stop_auto_updater(self) -> None:
"""Stop the autoupdater."""
if self._unsubscribe_auto_updater is not None:
self._unsubscribe_auto_updater()
self._unsubscribe_auto_updater = None
@callback
def auto_updater_hook(self, now: datetime) -> None:
"""Call for the autoupdater."""
self.async_write_ha_state()
if self._device.position_reached():
self.hass.async_create_task(self._device.auto_stop_if_necessary())
self.stop_auto_updater()
| apache-2.0 |
tqchen/tvm | python/tvm/te/hybrid/util.py | 5 | 3878 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Internal utilities for parsing Python subset to TIR"""
import ast
import inspect
import logging
import sys
import numpy
import tvm.runtime
from tvm._ffi.base import numeric_types
from tvm.ir.container import Array
from tvm.tir import expr as _expr
from tvm.tir import stmt as _stmt
from tvm.te.tensor import Tensor
# pylint: disable=invalid-name
np_arg_types = tuple(list(numeric_types) + [numpy.ndarray])
tvm_arg_types = (Tensor, Array, _expr.Var, _expr.ConstExpr)
halide_imm_types = (_expr.IntImm, _expr.FloatImm)
def _internal_assert(cond, err):
"""Simplify the code segment like if not XXX then raise an error"""
if not cond:
raise ValueError(err)
# Useful constants. In avoid of runtime dependences, we use function calls to return them.
def make_nop():
"""Returns a 'no operation' node in HalideIR."""
return _stmt.Evaluate(tvm.runtime.const(0, dtype="int32"))
def is_docstring(node):
"""Checks if a Python AST node is a docstring"""
return isinstance(node, ast.Expr) and isinstance(node.value, ast.Str)
def _pruned_source(func):
"""Prune source code's extra leading spaces"""
try:
lines = inspect.getsource(func).split("\n")
leading_space = len(lines[0]) - len(lines[0].lstrip(" "))
lines = [line[leading_space:] for line in lines]
return "\n".join(lines)
except IOError as err:
if sys.version_info[0] == 2 and str(err) == "could not get source code":
logging.log(
logging.CRITICAL,
"This module is not fully operated under Python2... " "Please move to Python3!",
)
raise err
def replace_io(body, rmap):
"""Replacing tensors usage according to the dict given"""
# pylint: disable=import-outside-toplevel
from tvm.tir import stmt_functor
def replace(op):
if isinstance(op, _stmt.ProducerStore) and op.producer.op in rmap.keys():
buf = rmap[op.producer.op]
return _stmt.ProducerStore(buf, op.value, op.indices)
if isinstance(op, _expr.ProducerLoad) and op.producer.op in rmap.keys():
buf = rmap[op.producer.op]
return _expr.ProducerLoad(buf, op.indices)
return None
return stmt_functor.ir_transform(body, None, replace, ["tir.ProducerStore", "tir.ProducerLoad"])
def _is_tvm_arg_types(args):
"""Determine a list of element is either a list of tvm arguments of a list of numpy arguments.
If neither is true, raise a value error."""
if isinstance(args[0], tvm_arg_types):
for elem in args[1:]:
_internal_assert(
isinstance(elem, tvm_arg_types),
"Expecting a Var, Tensor or ConstExpr instance but %s get!" % str(type(elem)),
)
return True
_internal_assert(
isinstance(args[0], np_arg_types), "Expect a numpy type but %s get!" % str(type(args[0]))
)
for elem in args[1:]:
_internal_assert(
isinstance(elem, np_arg_types), "Expect a numpy type but %s get!" % str(type(elem))
)
return False
| apache-2.0 |
fredojones/rpg | battle.py | 1 | 2967 |
import os
import sys
import random
import time
# Fight scene between player and enemy!
#
# If player wins, delete the enemy from the room.
# Calculate damage from given attacker and defender
def calculate_damage(attacker, defender):
armor = 0
# Does defender have armor?
if hasattr(defender, 'armor') and defender.armor != None:
armor = defender.armor.defense
damage = attacker.strength/2 + attacker.weapon.damage - armor
return random.randrange(int(damage), 2 * int(damage)) - defender.defense
# Calculate the damage if critical hit
def calculate_critical_damage(attacker, defender):
damage = attacker.strength/2 + attacker.weapon.damage
return (random.randrange(damage, 2 * damage) +
random.randrange(damage, 2 * damage) -
defender.defense)
def fight(player, enemy, room):
# Clear the screen
if os.name == 'posix':
os.system('clear')
else:
os.system('cls')
# Enemy intro message
print(enemy.generateintro() + "\n")
# Battle loop
while True:
if player.health <= 0:
print("You have died!")
sys.exit()
if enemy.health <= 0:
print("You have won this battle!!!")
print()
print("You gained " + str(enemy.exp) + " xp!")
player.add_exp(enemy.exp)
room.enemies.remove(enemy)
break
print("You have " + str(player.health) + " health")
print()
print("1. Attack")
print("2. Recover")
print("3. Flee")
command = input(">")
option = 0
# Check that we actually entered a number
try:
option = int(command)
except ValueError:
print("Please enter a number\n")
continue
# Check that we selected a menu option
if option > 3 or option < 1:
print("Please select a menu option\n")
continue
# Formulae from here:
# http://www.gamefaqs.com/boards/522595-final-fantasy/41701255
if option == 1:
# Damage enemy
damage = 0
# Calculate whether critical hit
if (random.randrange(0, 150) < player.dexterity):
damage = calculate_critical_damage(player, enemy)
print("\nCritical hit!\n")
else:
damage = calculate_damage(player, enemy)
enemy.health -= damage;
print("You hit the enemy for " + str(damage) + " with your " + player.weapon.name)
if option == 2:
# Calculate healing
healing = random.randrange(round(player.max_health/6), round(player.max_health/4))
player.health += healing
print("You healed for " + str(healing) + " health")
if option == 3:
if random.random() > 0.4:
# Flee
print("You ran away!")
return 0
else:
print("You failed to escape!")
time.sleep(0.5)
# Damage player
damage = calculate_damage(enemy, player)
player.health -= damage
print("The enemy hit you for " + str(damage) + " with its " + enemy.weapon.name + "\n")
time.sleep(0.5)
player.health = player.max_health
| mit |
madj4ck/ansible | lib/ansible/plugins/action/assert.py | 163 | 2353 | # Copyright 2012, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.playbook.conditional import Conditional
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
''' Fail with custom message '''
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
if not 'that' in self._task.args:
raise AnsibleError('conditional required in "that" string')
msg = None
if 'msg' in self._task.args:
msg = self._task.args['msg']
# make sure the 'that' items are a list
thats = self._task.args['that']
if not isinstance(thats, list):
thats = [ thats ]
# Now we iterate over the that items, temporarily assigning them
# to the task's when value so we can evaluate the conditional using
# the built in evaluate function. The when has already been evaluated
# by this point, and is not used again, so we don't care about mangling
# that value now
cond = Conditional(loader=self._loader)
for that in thats:
cond.when = [ that ]
test_result = cond.evaluate_conditional(templar=self._templar, all_vars=task_vars)
if not test_result:
result = dict(
failed = True,
evaluated_to = test_result,
assertion = that,
)
if msg:
result['msg'] = msg
return result
return dict(changed=False, msg='all assertions passed')
| gpl-3.0 |
pschmitt/home-assistant | homeassistant/components/mqtt/light/schema_template.py | 5 | 17155 | """Support for MQTT Template lights."""
import logging
import voluptuous as vol
from homeassistant.components import mqtt
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.components.mqtt import (
CONF_COMMAND_TOPIC,
CONF_QOS,
CONF_RETAIN,
CONF_STATE_TOPIC,
CONF_UNIQUE_ID,
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
subscription,
)
from homeassistant.const import (
CONF_DEVICE,
CONF_NAME,
CONF_OPTIMISTIC,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
import homeassistant.util.color as color_util
from ..debug_info import log_messages
from .schema import MQTT_LIGHT_SCHEMA_SCHEMA
_LOGGER = logging.getLogger(__name__)
DOMAIN = "mqtt_template"
DEFAULT_NAME = "MQTT Template Light"
DEFAULT_OPTIMISTIC = False
CONF_BLUE_TEMPLATE = "blue_template"
CONF_BRIGHTNESS_TEMPLATE = "brightness_template"
CONF_COLOR_TEMP_TEMPLATE = "color_temp_template"
CONF_COMMAND_OFF_TEMPLATE = "command_off_template"
CONF_COMMAND_ON_TEMPLATE = "command_on_template"
CONF_EFFECT_LIST = "effect_list"
CONF_EFFECT_TEMPLATE = "effect_template"
CONF_GREEN_TEMPLATE = "green_template"
CONF_MAX_MIREDS = "max_mireds"
CONF_MIN_MIREDS = "min_mireds"
CONF_RED_TEMPLATE = "red_template"
CONF_STATE_TEMPLATE = "state_template"
CONF_WHITE_VALUE_TEMPLATE = "white_value_template"
PLATFORM_SCHEMA_TEMPLATE = (
mqtt.MQTT_RW_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_BLUE_TEMPLATE): cv.template,
vol.Optional(CONF_BRIGHTNESS_TEMPLATE): cv.template,
vol.Optional(CONF_COLOR_TEMP_TEMPLATE): cv.template,
vol.Required(CONF_COMMAND_OFF_TEMPLATE): cv.template,
vol.Required(CONF_COMMAND_ON_TEMPLATE): cv.template,
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Optional(CONF_EFFECT_LIST): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_EFFECT_TEMPLATE): cv.template,
vol.Optional(CONF_GREEN_TEMPLATE): cv.template,
vol.Optional(CONF_MAX_MIREDS): cv.positive_int,
vol.Optional(CONF_MIN_MIREDS): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_RED_TEMPLATE): cv.template,
vol.Optional(CONF_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_WHITE_VALUE_TEMPLATE): cv.template,
}
)
.extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema)
.extend(mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
.extend(MQTT_LIGHT_SCHEMA_SCHEMA.schema)
)
async def async_setup_entity_template(
config, async_add_entities, config_entry, discovery_data
):
"""Set up a MQTT Template light."""
async_add_entities([MqttTemplate(config, config_entry, discovery_data)])
class MqttTemplate(
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
LightEntity,
RestoreEntity,
):
"""Representation of a MQTT Template light."""
def __init__(self, config, config_entry, discovery_data):
"""Initialize a MQTT Template light."""
self._state = False
self._sub_state = None
self._topics = None
self._templates = None
self._optimistic = False
# features
self._brightness = None
self._color_temp = None
self._white_value = None
self._hs = None
self._effect = None
self._unique_id = config.get(CONF_UNIQUE_ID)
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_data, self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA_TEMPLATE(discovery_payload)
self._setup_from_config(config)
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._config = config
self._topics = {
key: config.get(key) for key in (CONF_STATE_TOPIC, CONF_COMMAND_TOPIC)
}
self._templates = {
key: config.get(key)
for key in (
CONF_BLUE_TEMPLATE,
CONF_BRIGHTNESS_TEMPLATE,
CONF_COLOR_TEMP_TEMPLATE,
CONF_COMMAND_OFF_TEMPLATE,
CONF_COMMAND_ON_TEMPLATE,
CONF_EFFECT_TEMPLATE,
CONF_GREEN_TEMPLATE,
CONF_RED_TEMPLATE,
CONF_STATE_TEMPLATE,
CONF_WHITE_VALUE_TEMPLATE,
)
}
optimistic = config[CONF_OPTIMISTIC]
self._optimistic = (
optimistic
or self._topics[CONF_STATE_TOPIC] is None
or self._templates[CONF_STATE_TEMPLATE] is None
)
# features
if self._templates[CONF_BRIGHTNESS_TEMPLATE] is not None:
self._brightness = 255
else:
self._brightness = None
if self._templates[CONF_COLOR_TEMP_TEMPLATE] is not None:
self._color_temp = 255
else:
self._color_temp = None
if self._templates[CONF_WHITE_VALUE_TEMPLATE] is not None:
self._white_value = 255
else:
self._white_value = None
if (
self._templates[CONF_RED_TEMPLATE] is not None
and self._templates[CONF_GREEN_TEMPLATE] is not None
and self._templates[CONF_BLUE_TEMPLATE] is not None
):
self._hs = [0, 0]
else:
self._hs = None
self._effect = None
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
for tpl in self._templates.values():
if tpl is not None:
tpl.hass = self.hass
last_state = await self.async_get_last_state()
@callback
@log_messages(self.hass, self.entity_id)
def state_received(msg):
"""Handle new MQTT messages."""
state = self._templates[
CONF_STATE_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
if state == STATE_ON:
self._state = True
elif state == STATE_OFF:
self._state = False
else:
_LOGGER.warning("Invalid state value received")
if self._brightness is not None:
try:
self._brightness = int(
self._templates[
CONF_BRIGHTNESS_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
)
except ValueError:
_LOGGER.warning("Invalid brightness value received")
if self._color_temp is not None:
try:
self._color_temp = int(
self._templates[
CONF_COLOR_TEMP_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
)
except ValueError:
_LOGGER.warning("Invalid color temperature value received")
if self._hs is not None:
try:
red = int(
self._templates[
CONF_RED_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
)
green = int(
self._templates[
CONF_GREEN_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
)
blue = int(
self._templates[
CONF_BLUE_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
)
self._hs = color_util.color_RGB_to_hs(red, green, blue)
except ValueError:
_LOGGER.warning("Invalid color value received")
if self._white_value is not None:
try:
self._white_value = int(
self._templates[
CONF_WHITE_VALUE_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
)
except ValueError:
_LOGGER.warning("Invalid white value received")
if self._templates[CONF_EFFECT_TEMPLATE] is not None:
effect = self._templates[
CONF_EFFECT_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
if effect in self._config.get(CONF_EFFECT_LIST):
self._effect = effect
else:
_LOGGER.warning("Unsupported effect value received")
self.async_write_ha_state()
if self._topics[CONF_STATE_TOPIC] is not None:
self._sub_state = await subscription.async_subscribe_topics(
self.hass,
self._sub_state,
{
"state_topic": {
"topic": self._topics[CONF_STATE_TOPIC],
"msg_callback": state_received,
"qos": self._config[CONF_QOS],
}
},
)
if self._optimistic and last_state:
self._state = last_state.state == STATE_ON
if last_state.attributes.get(ATTR_BRIGHTNESS):
self._brightness = last_state.attributes.get(ATTR_BRIGHTNESS)
if last_state.attributes.get(ATTR_HS_COLOR):
self._hs = last_state.attributes.get(ATTR_HS_COLOR)
if last_state.attributes.get(ATTR_COLOR_TEMP):
self._color_temp = last_state.attributes.get(ATTR_COLOR_TEMP)
if last_state.attributes.get(ATTR_EFFECT):
self._effect = last_state.attributes.get(ATTR_EFFECT)
if last_state.attributes.get(ATTR_WHITE_VALUE):
self._white_value = last_state.attributes.get(ATTR_WHITE_VALUE)
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state
)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
await MqttDiscoveryUpdate.async_will_remove_from_hass(self)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def color_temp(self):
"""Return the color temperature in mired."""
return self._color_temp
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return self._config.get(CONF_MIN_MIREDS, super().min_mireds)
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return self._config.get(CONF_MAX_MIREDS, super().max_mireds)
@property
def hs_color(self):
"""Return the hs color value [int, int]."""
return self._hs
@property
def white_value(self):
"""Return the white property."""
return self._white_value
@property
def should_poll(self):
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return False
@property
def name(self):
"""Return the name of the entity."""
return self._config[CONF_NAME]
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def is_on(self):
"""Return True if entity is on."""
return self._state
@property
def assumed_state(self):
"""Return True if unable to access real state of the entity."""
return self._optimistic
@property
def effect_list(self):
"""Return the list of supported effects."""
return self._config.get(CONF_EFFECT_LIST)
@property
def effect(self):
"""Return the current effect."""
return self._effect
async def async_turn_on(self, **kwargs):
"""Turn the entity on.
This method is a coroutine.
"""
values = {"state": True}
if self._optimistic:
self._state = True
if ATTR_BRIGHTNESS in kwargs:
values["brightness"] = int(kwargs[ATTR_BRIGHTNESS])
if self._optimistic:
self._brightness = kwargs[ATTR_BRIGHTNESS]
if ATTR_COLOR_TEMP in kwargs:
values["color_temp"] = int(kwargs[ATTR_COLOR_TEMP])
if self._optimistic:
self._color_temp = kwargs[ATTR_COLOR_TEMP]
if ATTR_HS_COLOR in kwargs:
hs_color = kwargs[ATTR_HS_COLOR]
# If there's a brightness topic set, we don't want to scale the RGB
# values given using the brightness.
if self._templates[CONF_BRIGHTNESS_TEMPLATE] is not None:
brightness = 255
else:
brightness = kwargs.get(
ATTR_BRIGHTNESS, self._brightness if self._brightness else 255
)
rgb = color_util.color_hsv_to_RGB(
hs_color[0], hs_color[1], brightness / 255 * 100
)
values["red"] = rgb[0]
values["green"] = rgb[1]
values["blue"] = rgb[2]
if self._optimistic:
self._hs = kwargs[ATTR_HS_COLOR]
if ATTR_WHITE_VALUE in kwargs:
values["white_value"] = int(kwargs[ATTR_WHITE_VALUE])
if self._optimistic:
self._white_value = kwargs[ATTR_WHITE_VALUE]
if ATTR_EFFECT in kwargs:
values["effect"] = kwargs.get(ATTR_EFFECT)
if self._optimistic:
self._effect = kwargs[ATTR_EFFECT]
if ATTR_FLASH in kwargs:
values["flash"] = kwargs.get(ATTR_FLASH)
if ATTR_TRANSITION in kwargs:
values["transition"] = int(kwargs[ATTR_TRANSITION])
mqtt.async_publish(
self.hass,
self._topics[CONF_COMMAND_TOPIC],
self._templates[CONF_COMMAND_ON_TEMPLATE].async_render(**values),
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic:
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the entity off.
This method is a coroutine.
"""
values = {"state": False}
if self._optimistic:
self._state = False
if ATTR_TRANSITION in kwargs:
values["transition"] = int(kwargs[ATTR_TRANSITION])
mqtt.async_publish(
self.hass,
self._topics[CONF_COMMAND_TOPIC],
self._templates[CONF_COMMAND_OFF_TEMPLATE].async_render(**values),
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic:
self.async_write_ha_state()
@property
def supported_features(self):
"""Flag supported features."""
features = SUPPORT_FLASH | SUPPORT_TRANSITION
if self._brightness is not None:
features = features | SUPPORT_BRIGHTNESS
if self._hs is not None:
features = features | SUPPORT_COLOR
if self._config.get(CONF_EFFECT_LIST) is not None:
features = features | SUPPORT_EFFECT
if self._color_temp is not None:
features = features | SUPPORT_COLOR_TEMP
if self._white_value is not None:
features = features | SUPPORT_WHITE_VALUE
return features
| apache-2.0 |
PhilSk/zulip | zerver/management/commands/process_queue.py | 32 | 3101 | from __future__ import absolute_import
from types import FrameType
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import BaseCommand
from django.core.management import CommandError
from django.conf import settings
from django.utils import autoreload
from zerver.worker.queue_processors import get_worker, get_active_worker_queues
import sys
import signal
import logging
import threading
class Command(BaseCommand):
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('--queue_name', metavar='<queue name>', type=str,
help="queue to process")
parser.add_argument('--worker_num', metavar='<worker number>', type=int, nargs='?', default=0,
help="worker label")
parser.add_argument('--all', dest="all", action="store_true", default=False,
help="run all queues")
help = "Runs a queue processing worker"
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
logging.basicConfig()
logger = logging.getLogger('process_queue')
if not settings.USING_RABBITMQ:
# Make the warning silent when running the tests
if settings.TEST_SUITE:
logger.info("Not using RabbitMQ queue workers in the test suite.")
else:
logger.error("Cannot run a queue processor when USING_RABBITMQ is False!")
sys.exit(1)
def run_threaded_workers(logger):
# type: (logging.Logger) -> None
cnt = 0
for queue_name in get_active_worker_queues():
if not settings.DEVELOPMENT:
logger.info('launching queue worker thread ' + queue_name)
cnt += 1
td = Threaded_worker(queue_name)
td.start()
logger.info('%d queue worker threads were launched' % (cnt,))
if options['all']:
autoreload.main(run_threaded_workers, (logger,))
else:
queue_name = options['queue_name']
worker_num = options['worker_num']
logger.info("Worker %d connecting to queue %s" % (worker_num, queue_name))
worker = get_worker(queue_name)
worker.setup()
def signal_handler(signal, frame):
# type: (int, FrameType) -> None
logger.info("Worker %d disconnecting from queue %s" % (worker_num, queue_name))
worker.stop()
sys.exit(0)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
worker.start()
class Threaded_worker(threading.Thread):
def __init__(self, queue_name):
# type: (str) -> None
threading.Thread.__init__(self)
self.worker = get_worker(queue_name)
def run(self):
# type: () -> None
self.worker.setup()
logging.debug('starting consuming ' + self.worker.queue_name)
self.worker.start()
| apache-2.0 |
milinbhakta/flaskmaterialdesign | venv/Lib/site-packages/pip/_vendor/requests/packages/chardet/mbcsgroupprober.py | 2769 | 1967 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .utf8prober import UTF8Prober
from .sjisprober import SJISProber
from .eucjpprober import EUCJPProber
from .gb2312prober import GB2312Prober
from .euckrprober import EUCKRProber
from .cp949prober import CP949Prober
from .big5prober import Big5Prober
from .euctwprober import EUCTWProber
class MBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
UTF8Prober(),
SJISProber(),
EUCJPProber(),
GB2312Prober(),
EUCKRProber(),
CP949Prober(),
Big5Prober(),
EUCTWProber()
]
self.reset()
| gpl-2.0 |
jkent/jkent-pybot | pybot/plugins/math/expression.py | 2 | 18249 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Expression parser and compiler."""
import string
import inspect
import collections
import math
import random
TOKEN_NUM = 1
TOKEN_VAR = 2
TOKEN_FUNC = 4
TOKEN_OP_RTL = 8
TOKEN_OP_LTR = 16
TOKEN_LPAREN = 32
TOKEN_RPAREN = 64
TOKEN_OPERATOR = TOKEN_OP_RTL | TOKEN_OP_LTR | TOKEN_LPAREN
TOKEN_VALUE = TOKEN_NUM | TOKEN_VAR | TOKEN_FUNC | TOKEN_RPAREN
OPS_RTL = {
'~': (7, lambda a: lambda env: ~a(env)),
}
OPS_LTR = {
'**': (8, lambda a, b: lambda env: a(env) ** b(env)),
'*': (6, lambda a, b: lambda env: a(env) * b(env)),
'/': (6, lambda a, b: lambda env: a(env) / b(env)),
'%': (6, lambda a, b: lambda env: a(env) % b(env)),
'+': (5, lambda a, b: lambda env: a(env) + b(env)),
'-': (5, lambda a, b: lambda env: a(env) - b(env)),
'<<': (4, lambda a, b: lambda env: a(env) << b(env)),
'>>': (4, lambda a, b: lambda env: a(env) >> b(env)),
'&': (3, lambda a, b: lambda env: a(env) & b(env)),
'^': (2, lambda a, b: lambda env: a(env) ^ b(env)),
'|': (1, lambda a, b: lambda env: a(env) | b(env)),
}
OPS = OPS_RTL.copy()
OPS.update(OPS_LTR)
BUILTIN_FUNCS = {
'abs': {'nreq': 1, 'func': abs, 'desc': 'Absolute value'},
'bin': {'nreq': 1, 'func': bin, 'desc': 'Binary notation'},
'conjugate': {'func': lambda x: x.conjugate()},
'dec': {'nreq': 1, 'func': int, 'desc': 'Decimal notation'},
'imag': {'func': lambda x: x.imag, 'desc': 'Imaginary portion of number'},
'int': {'nreq': 1, 'func': int, 'desc': 'Cast to integer'},
'float': {'nreq': 1, 'func': float, 'desc': 'Cast to floating point'},
'ceil': {'nreq': 1, 'func': math.ceil, 'desc': 'Round up'},
'cos': {'nreq': 1, 'func': math.cos, 'desc': 'Trig cosine function'},
'factorial': {'nreq': 1, 'func': math.factorial},
'floor': {'nreq': 1, 'func': math.floor, 'desc': 'Round down'},
'hex': {'nreq': 1, 'func': hex, 'desc': 'Hex notation'},
'log': {'nreq': (1, 2), 'func': math.log, 'desc': 'Logarithm, second argument is base'},
'log10': {'nreq': 1, 'func': math.log10, 'desc': 'Base 10 logarithm'},
'oct': {'nreq': 1, 'func': oct, 'desc': 'Octal notation'},
'round': {'nreq': (1, 2), 'func': round, 'desc': 'Normal round, second argument is number of decimal places'},
'sin': {'nreq': 1, 'func': math.sin, 'desc': 'Trig sine function'},
'sqrt': {'nreq': 1, 'func': math.sqrt, 'desc': 'Square root function'},
'tan': {'nreq': 1, 'func': math.tan, 'desc': 'Trig tangent function'},
'rand': {'nreq': 0, 'func': random.random, 'desc': 'Random number between 0 and 1'},
'randint': {'func': random.randint, 'desc': 'Random integer between first and second arguments, inclusive'},
'real': {'func': lambda x: x.real, 'desc': 'Real portion of number'},
}
BUILTIN_VARS = {
'pi': math.pi,
'e': math.e,
}
class ExpressionError(Exception):
"""Exceptoion base for all expression exceptions."""
def __init__(self, pos, message):
super(ExpressionError, self).__init__(message)
self.pos = pos
class ParseError(ExpressionError):
"""Exception that is thrown during expression parsing."""
def __init__(self, pos, message):
super(ParseError, self).__init__(pos, message)
class CompileError(ExpressionError):
"""Exception that is thrown during expression compliation."""
def __init__(self, pos, message):
super(CompileError, self).__init__(pos, message)
class ComputeError(ExpressionError):
"""Exception that is thrown during expression computation."""
def __init__(self, pos, message):
super(ComputeError, self).__init__(pos, message)
class DeclarationError(Exception):
"""Exception that is thrown during func declaration."""
def __init__(self, message):
super(DeclarationError, self).__init__(message)
def issymbol(s):
"""Validates a string as a symbol."""
if not s:
return False
if s[0] not in string.ascii_letters + '_':
return False
for c in s[1:]:
if c not in string.digits + string.ascii_letters + '_':
return False
return True
def parse_expr(expr, offset=0):
"""Expression parser that returns a tuple of tokens."""
tokens = []
pos = 0
paren = 0
token_start = None
last = 0
def add_value():
"""Adds a value type (except for func) to the tokens list."""
token = expr[token_start:pos]
if not token:
return last
if issymbol(token):
type_ = TOKEN_VAR
else:
try:
if 'j' in token:
token = complex(token)
elif '.' in token or 'e' in token:
token = float(token)
elif 'x' in token or 'b' in token or \
(len(token) > 1 and token[0] == '0'):
int(token, 0)
else:
token = int(token)
type_ = TOKEN_NUM
except ValueError:
raise ParseError(offset + token_start, 'invalid token')
if last & ~TOKEN_OPERATOR:
raise ParseError(offset + token_start, 'operator expected')
tokens.append((type_, token, offset + token_start))
return type_
def add_func(pos):
"""Adds a func type to the tokens list."""
if last & ~TOKEN_OPERATOR:
raise ParseError(offset + token_start, 'operator expected')
name = expr[token_start:pos]
if not issymbol(name):
raise ParseError(offset + token_start, 'invalid func name')
args = []
pos += 1
start = pos
paren = 1
while pos < len(expr):
if expr[pos] == '(':
paren += 1
elif expr[pos] == ',':
if paren == 1:
subexpr = expr[start:pos]
args.append(parse_expr(subexpr, offset + start))
pos += 1
while pos < len(expr):
if expr[pos] != ' ':
break
pos += 1
start = pos
continue
elif expr[pos] == ')':
paren -= 1
if paren == 0:
subexpr = expr[start:pos]
if args or subexpr.strip():
args.append(parse_expr(subexpr, offset + start))
pos += 1
break
pos += 1
if paren > 0:
raise ParseError(offset + pos, 'closing parenthesis expected')
tokens.append((TOKEN_FUNC, (name,) + tuple(args), offset + token_start))
return pos
while pos < len(expr):
if expr[pos].isspace():
if token_start != None:
last = add_value()
token_start = None
pos += 1
elif expr[pos] == '-' and token_start == None and \
expr[pos+1:pos+2] in string.digits and not last & TOKEN_VALUE:
token_start = pos
pos += 1
elif expr[pos] in '+-' and token_start != None and \
expr[pos-1:pos] == 'e' and expr[pos-2:pos-1] in string.digits + '.':
pos += 1
elif expr[pos:pos+2] in OPS_RTL:
if token_start != None:
last = add_value()
token_start = None
if last & ~(TOKEN_OP_LTR | TOKEN_LPAREN):
raise ParseError(offset + pos, 'operator expected')
tokens.append((TOKEN_OP_RTL, expr[pos:pos+2], offset + pos))
last = TOKEN_OP_RTL
pos += 2
elif expr[pos:pos+2] in OPS_LTR:
if token_start != None:
last = add_value()
token_start = None
if not last & TOKEN_VALUE:
raise ParseError(offset + pos, 'value expected1')
tokens.append((TOKEN_OP_LTR, expr[pos:pos+2], offset + pos))
last = TOKEN_OP_LTR
pos += 2
elif expr[pos] in OPS_RTL:
if token_start != None:
last = add_value()
token_start = None
if last & ~(TOKEN_OP_LTR | TOKEN_LPAREN):
raise ParseError(offset + pos, 'operator expected')
tokens.append((TOKEN_OP_RTL, expr[pos], offset + pos))
last = TOKEN_OP_RTL
pos += 1
elif expr[pos] in OPS_LTR:
if token_start != None:
last = add_value()
token_start = None
if not last & TOKEN_VALUE:
raise ParseError(offset + pos, 'value expected2')
tokens.append((TOKEN_OP_LTR, expr[pos], offset + pos))
last = TOKEN_OP_LTR
pos += 1
elif expr[pos] == '(':
if token_start == None:
paren += 1
if last & ~TOKEN_OPERATOR:
raise ParseError(offset + pos, 'operator expected')
tokens.append((TOKEN_LPAREN, '(', offset + pos))
last = TOKEN_LPAREN
pos += 1
else:
pos = add_func(pos)
token_start = None
last = TOKEN_FUNC
elif expr[pos] == ')':
if token_start != None:
last = add_value()
token_start = None
paren -= 1
if paren < 0:
raise ParseError(tokens[-1][2], 'unexpected parenthesis')
if last & TOKEN_OPERATOR:
raise ParseError(offset + pos, 'value expected')
tokens.append((TOKEN_RPAREN, ')', offset + pos))
last = TOKEN_RPAREN
pos += 1
else:
if expr[pos] not in string.digits + string.ascii_letters + '._':
raise ParseError(offset + pos, 'invalid token')
if token_start == None:
token_start = pos
pos += 1
if token_start != None:
last = add_value()
if not tokens or (last & TOKEN_OPERATOR):
raise ParseError(offset + pos - 1, 'value expected!')
if paren > 0:
raise ParseError(offset + pos, 'closing parenthesis expected')
return tuple(tokens)
def coherse(arg):
"""Wraps a value with a function that converts strings to integers."""
def inner(env):
"""Converts strings to integers."""
value = arg(env)
if isinstance(value, str):
value = int(value, 0)
return value
return inner
def call_func(name, pos, values, env):
"""Calls a func type at expression compute time."""
func_dict = env.get('funcs',{}).get(name)
if func_dict == None:
raise ComputeError(pos, 'undefined func: ' + name)
func = func_dict['func']
if func_dict.get('expr', None):
names = func_dict.get('args', ())
nreq, narg = len(names), len(values)
if narg != nreq:
raise ComputeError(pos, 'func %s expects %d argument(s), got %d' % \
(name, nreq, narg))
args = []
for value in values:
args.append(value(env))
stack = env.setdefault('stack', [])
if name in stack:
raise ComputeError(pos, 'recursion detected while calling func: ' + \
name)
stack.append(name)
new_env = env.copy()
new_env['locals'] = dict(list(zip(names, args)))
try:
value = func(new_env)
except ExpressionError as exc:
stack.pop()
exc_handler = env.get('exc_handler')
if exc_handler:
exc_handler(name, args, exc, env, func_dict.get('expr'))
raise ComputeError(pos, 'exception in func: ' + name)
stack.pop()
else:
narg, nreq = len(values), func_dict.get('nreq')
if isinstance(nreq, tuple):
nreq_min, nreq_max = nreq
if narg < nreq_min or narg > nreq_max:
raise ComputeError(pos, 'func %s expects %d to %d arguments, ' \
'got %d' % (name, nreq_min, nreq_max, narg))
else:
if nreq == None:
if inspect.isbuiltin(func) or isinstance(func, type):
raise ComputeError(pos, 'func %s must have nreq ' \
'or nreq_min/nreq_max defined' % name)
nreq = func.__code__.co_argcount
if inspect.ismethod(func):
nreq -= 1
if narg != nreq:
raise ComputeError(pos, 'func %s expects %d argument(s), got ' \
'%d' % (name, nreq, narg))
args = []
for value in values:
args.append(coherse(value)(env))
try:
value = func(*args)
except ExpressionError as exc:
exc_handler = env.get('exc_handler')
if exc_handler:
exc_handler(name, args, exc, env, func_dict.get('expr'))
raise ComputeError(pos, 'exception in func: ' + name)
return value
def compile_expr(tokens):
"""Compiles an expression in parsed/token form to a tree of lambdas."""
output = []
stack = []
def operator(token):
"""Emits an operator lambda."""
_, value, _ = token
func = OPS[value][1]
args = []
for _ in range(func.__code__.co_argcount):
args.insert(0, coherse(output.pop()))
return func(*args)
def operand(token):
"""Emits an operand lambda."""
type_, value, pos = token
if type_ == TOKEN_NUM:
return lambda env: value
elif type_ == TOKEN_VAR:
name = value
def get_var(env):
"""Compute-time function to resolve a var."""
value = env.get('locals', {}).get(name)
if value == None:
value = env.get('globals', {}).get(name)
if value == None:
raise ComputeError(pos, 'undefined var: ' + name)
return value
return get_var
elif type_ == TOKEN_FUNC:
name, token_sets = value[0], value[1:]
compiled_args = []
for tokens in token_sets:
compiled_args.append(compile_expr(tokens))
return lambda env: call_func(name, pos, compiled_args, env)
else:
raise CompileError(pos, 'unsupported token')
for token in tokens:
type_, value, _ = token
if type_ == TOKEN_OP_RTL:
while stack and stack[-1][0] != TOKEN_LPAREN:
if OPS[stack[-1][1]][0] <= OPS_RTL[value][0]:
break
output.append(operator(stack.pop()))
stack.append(token)
elif type_ == TOKEN_OP_LTR:
while stack and stack[-1][0] != TOKEN_LPAREN:
if OPS[stack[-1][1]][0] < OPS_LTR[value][0]:
break
output.append(operator(stack.pop()))
stack.append(token)
elif type_ == TOKEN_LPAREN:
stack.append(token)
elif type_ == TOKEN_RPAREN:
while stack and stack[-1][0] != TOKEN_LPAREN:
output.append(operator(stack.pop()))
stack.pop()
else:
output.append(operand(token))
while stack:
output.append(operator(stack.pop()))
if stack or len(output) != 1:
raise CompileError(0, 'invalid token input')
return output[0]
def define_func(env, name, args, expr, desc=None):
"""Compiles an expression and saves it as a function in the environment."""
if not issymbol(name):
raise DeclarationError('name is not a valid symbol: ' + name)
funcs = env.setdefault('funcs', {})
args = args.strip()
if args:
args = tuple(map(str.strip, args.split(',')))
for arg, count in list(collections.Counter(args).items()):
if not issymbol(arg):
raise DeclarationError('arg is not a valid symbol: ' + arg)
if count > 1:
raise DeclarationError('arg is specified multiple times: ' + arg)
else:
args = ()
tokens = parse_expr(expr)
compiled = compile_expr(tokens)
func = {'expr': expr, 'args': args, 'func': compiled}
if desc != None:
func['desc'] = desc
funcs[name] = func
def undefine_func(env, name):
if not issymbol(name):
raise DeclarationError('name is not a valid symbol: ' + name)
funcs = env.setdefault('funcs', {})
if name in funcs:
del funcs[name]
def define_var(env, name, expr):
"""Compiles and computes an expression, storing the value in the environment."""
if not issymbol(name):
raise DeclarationError('name is not a valid symbol: ' + name)
globals_ = env.setdefault('globals', {})
tokens = parse_expr(expr)
compiled = compile_expr(tokens)
globals_[name] = compiled(env)
def undefine_var(env, name):
"""Undefines a global var."""
if not issymbol(name):
raise DeclarationError('name is not a valid symbol: ' + name)
globals_ = env.setdefault('globals', {})
if name in globals_:
del globals_[name]
def expr_exc_handler(name, args, exc, env, expr):
"""Sample expr exception handler."""
print('Error: ' + exc.message)
if expr:
print(' ' + expr)
print(' ' * (exc.pos + 2) + '^')
if __name__ == '__main__':
import sys
env = {}
env['exc_handler'] = expr_exc_handler
env['globals'] = BUILTIN_VARS.copy()
env['funcs'] = BUILTIN_FUNCS.copy()
define_func(env, 'f0(x)', 'x+1')
define_func(env, 'f1()', 'x+1')
define_func(env, 'f2(x)', 'f1()+x')
define_func(env, 'area_of_circle(r)', '2*pi*r**2')
define_func(env, 'recurse()', 'recurse()')
expr = sys.argv[1]
try:
tokens = parse_expr(expr)
compiled = compile_expr(tokens)
for i in range(5):
env['globals']['x'] = i
result = compiled(env)
print('for x = %d:' % i, expr, '=', result)
except ExpressionError as exc:
expr_exc_handler('', [], exc, env, expr)
sys.exit(1)
| mit |
jaeilepp/mne-python | mne/io/matrix.py | 8 | 4666 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
from .constants import FIFF
from .tag import find_tag, has_tag
from .write import (write_int, start_block, end_block, write_float_matrix,
write_name_list)
from ..utils import logger, verbose
def _transpose_named_matrix(mat):
"""Transpose mat inplace (no copy)."""
mat['nrow'], mat['ncol'] = mat['ncol'], mat['nrow']
mat['row_names'], mat['col_names'] = mat['col_names'], mat['row_names']
mat['data'] = mat['data'].T
@verbose
def _read_named_matrix(fid, node, matkind, indent=' ', transpose=False,
verbose=None):
"""Read named matrix from the given node.
Parameters
----------
fid : file
The opened file descriptor.
node : dict
The node in the tree.
matkind : int
The type of matrix.
transpose : bool
If True, transpose the matrix. Default is False.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
mat: dict
The matrix data
"""
# Descend one level if necessary
if node['block'] != FIFF.FIFFB_MNE_NAMED_MATRIX:
for k in range(node['nchild']):
if node['children'][k]['block'] == FIFF.FIFFB_MNE_NAMED_MATRIX:
if has_tag(node['children'][k], matkind):
node = node['children'][k]
break
else:
logger.info(indent + 'Desired named matrix (kind = %d) not '
'available' % matkind)
return None
else:
if not has_tag(node, matkind):
logger.info(indent + 'Desired named matrix (kind = %d) not '
'available' % matkind)
return None
# Read everything we need
tag = find_tag(fid, node, matkind)
if tag is None:
raise ValueError('Matrix data missing')
else:
data = tag.data
nrow, ncol = data.shape
tag = find_tag(fid, node, FIFF.FIFF_MNE_NROW)
if tag is not None and tag.data != nrow:
raise ValueError('Number of rows in matrix data and FIFF_MNE_NROW '
'tag do not match')
tag = find_tag(fid, node, FIFF.FIFF_MNE_NCOL)
if tag is not None and tag.data != ncol:
raise ValueError('Number of columns in matrix data and '
'FIFF_MNE_NCOL tag do not match')
tag = find_tag(fid, node, FIFF.FIFF_MNE_ROW_NAMES)
row_names = tag.data.split(':') if tag is not None else []
tag = find_tag(fid, node, FIFF.FIFF_MNE_COL_NAMES)
col_names = tag.data.split(':') if tag is not None else []
mat = dict(nrow=nrow, ncol=ncol, row_names=row_names, col_names=col_names,
data=data)
if transpose:
_transpose_named_matrix(mat)
return mat
def write_named_matrix(fid, kind, mat):
"""Write named matrix from the given node.
Parameters
----------
fid : file
The opened file descriptor.
kind : int
The kind of the matrix.
matkind : int
The type of matrix.
"""
# let's save ourselves from disaster
n_tot = mat['nrow'] * mat['ncol']
if mat['data'].size != n_tot:
ratio = n_tot / float(mat['data'].size)
if n_tot < mat['data'].size and ratio > 0:
ratio = 1 / ratio
raise ValueError('Cannot write matrix: row (%i) and column (%i) '
'total element (%i) mismatch with data size (%i), '
'appears to be off by a factor of %gx'
% (mat['nrow'], mat['ncol'], n_tot,
mat['data'].size, ratio))
start_block(fid, FIFF.FIFFB_MNE_NAMED_MATRIX)
write_int(fid, FIFF.FIFF_MNE_NROW, mat['nrow'])
write_int(fid, FIFF.FIFF_MNE_NCOL, mat['ncol'])
if len(mat['row_names']) > 0:
# let's prevent unintentional stupidity
if len(mat['row_names']) != mat['nrow']:
raise ValueError('len(mat["row_names"]) != mat["nrow"]')
write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, mat['row_names'])
if len(mat['col_names']) > 0:
# let's prevent unintentional stupidity
if len(mat['col_names']) != mat['ncol']:
raise ValueError('len(mat["col_names"]) != mat["ncol"]')
write_name_list(fid, FIFF.FIFF_MNE_COL_NAMES, mat['col_names'])
write_float_matrix(fid, kind, mat['data'])
end_block(fid, FIFF.FIFFB_MNE_NAMED_MATRIX)
| bsd-3-clause |
laiy/Database_Project | third_party/nltk/tokenize/__init__.py | 7 | 4380 | # -*- coding: utf-8 -*-
# Natural Language Toolkit: Tokenizers
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# Steven Bird <stevenbird1@gmail.com> (minor additions)
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
r"""
NLTK Tokenizer Package
Tokenizers divide strings into lists of substrings. For example,
tokenizers can be used to find the words and punctuation in a string:
>>> from nltk.tokenize import word_tokenize
>>> s = '''Good muffins cost $3.88\nin New York. Please buy me
... two of them.\n\nThanks.'''
>>> word_tokenize(s)
['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.',
'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
This particular tokenizer requires the Punkt sentence tokenization
models to be installed. NLTK also provides a simpler,
regular-expression based tokenizer, which splits text on whitespace
and punctuation:
>>> from nltk.tokenize import wordpunct_tokenize
>>> wordpunct_tokenize(s)
['Good', 'muffins', 'cost', '$', '3', '.', '88', 'in', 'New', 'York', '.',
'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
We can also operate at the level of sentences, using the sentence
tokenizer directly as follows:
>>> from nltk.tokenize import sent_tokenize, word_tokenize
>>> sent_tokenize(s)
['Good muffins cost $3.88\nin New York.', 'Please buy me\ntwo of them.', 'Thanks.']
>>> [word_tokenize(t) for t in sent_tokenize(s)]
[['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.'],
['Please', 'buy', 'me', 'two', 'of', 'them', '.'], ['Thanks', '.']]
Caution: when tokenizing a Unicode string, make sure you are not
using an encoded version of the string (it may be necessary to
decode it first, e.g. with ``s.decode("utf8")``.
NLTK tokenizers can produce token-spans, represented as tuples of integers
having the same semantics as string slices, to support efficient comparison
of tokenizers. (These methods are implemented as generators.)
>>> from nltk.tokenize import WhitespaceTokenizer
>>> list(WhitespaceTokenizer().span_tokenize(s))
[(0, 4), (5, 12), (13, 17), (18, 23), (24, 26), (27, 30), (31, 36), (38, 44),
(45, 48), (49, 51), (52, 55), (56, 58), (59, 64), (66, 73)]
There are numerous ways to tokenize text. If you need more control over
tokenization, see the other methods provided in this package.
For further information, please see Chapter 3 of the NLTK book.
"""
from nltk.data import load
from nltk.tokenize.simple import (SpaceTokenizer, TabTokenizer, LineTokenizer,
line_tokenize)
from nltk.tokenize.regexp import (RegexpTokenizer, WhitespaceTokenizer,
BlanklineTokenizer, WordPunctTokenizer,
wordpunct_tokenize, regexp_tokenize,
blankline_tokenize)
from nltk.tokenize.punkt import PunktSentenceTokenizer
from nltk.tokenize.sexpr import SExprTokenizer, sexpr_tokenize
from nltk.tokenize.treebank import TreebankWordTokenizer
from nltk.tokenize.texttiling import TextTilingTokenizer
# Standard sentence tokenizer.
def sent_tokenize(text, language='english'):
"""
Return a sentence-tokenized copy of *text*,
using NLTK's recommended sentence tokenizer
(currently :class:`.PunktSentenceTokenizer`
for the specified language).
:param text: text to split into sentences
:param language: the model name in the Punkt corpus
"""
tokenizer = load('tokenizers/punkt/{0}.pickle'.format(language))
return tokenizer.tokenize(text)
# Standard word tokenizer.
_treebank_word_tokenize = TreebankWordTokenizer().tokenize
def word_tokenize(text, language='english'):
"""
Return a tokenized copy of *text*,
using NLTK's recommended word tokenizer
(currently :class:`.TreebankWordTokenizer`
along with :class:`.PunktSentenceTokenizer`
for the specified language).
:param text: text to split into sentences
:param language: the model name in the Punkt corpus
"""
return [token for sent in sent_tokenize(text, language)
for token in _treebank_word_tokenize(sent)]
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| apache-2.0 |
xin3liang/platform_external_chromium_org | tools/cr/cr/base/arch.py | 113 | 1544 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the basic architectures supported by cr."""
import cr
DEFAULT = cr.Config.From(
CR_ENVSETUP_ARCH='{CR_ARCH}',
)
class Arch(cr.Plugin, cr.Plugin.Type):
"""Base class for implementing cr architecture targets."""
SELECTOR = 'CR_ARCH'
@classmethod
def AddArguments(cls, parser):
parser.add_argument(
'--architecture', dest=cls.SELECTOR,
choices=cls.Choices(),
default=None,
help='Sets the target architecture to use. Overrides ' + cls.SELECTOR
)
class IA32Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='ia32',
)
class Mips32Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='mipsel',
)
@property
def enabled(self):
return cr.AndroidPlatform.GetInstance().is_active
class X64Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='x64',
)
@property
def priority(self):
return super(X64Arch, self).priority + 1
class Arm32Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='arm',
)
@property
def priority(self):
return super(Arm32Arch, self).priority + 2
@property
def enabled(self):
return cr.AndroidPlatform.GetInstance().is_active
class Arm64Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='arm64',
)
@property
def enabled(self):
return cr.AndroidPlatform.GetInstance().is_active
| bsd-3-clause |
t3dev/odoo | addons/point_of_sale/models/pos_session.py | 1 | 17460 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import timedelta
from odoo import api, fields, models, SUPERUSER_ID, _
from odoo.exceptions import UserError, ValidationError
class PosSession(models.Model):
_name = 'pos.session'
_order = 'id desc'
_description = 'Point of Sale Session'
_inherit = ['mail.thread', 'mail.activity.mixin']
POS_SESSION_STATE = [
('opening_control', 'Opening Control'), # method action_pos_session_open
('opened', 'In Progress'), # method action_pos_session_closing_control
('closing_control', 'Closing Control'), # method action_pos_session_close
('closed', 'Closed & Posted'),
]
def _confirm_orders(self):
for session in self:
journal = session.config_id.journal_id
if not journal:
raise UserError(_("You have to set a Sale Journal for the POS:%s") % (session.config_id.name,))
orders = session.order_ids.filtered(lambda order: order.state == 'paid')
orders.with_context(force_company=journal.company_id.id)._create_account_move_line(session)
for order in session.order_ids.filtered(lambda o: o.state not in ['done', 'invoiced']):
if order.state not in ('paid'):
raise UserError(
_("You cannot confirm all orders of this session, because they have not the 'paid' status.\n"
"{reference} is in state {state}, total amount: {total}, paid: {paid}").format(
reference=order.pos_reference or order.name,
state=order.state,
total=order.amount_total,
paid=order.amount_paid,
))
order.action_pos_order_done()
orders_to_reconcile = session.order_ids._filtered_for_reconciliation()
orders_to_reconcile.sudo()._reconcile_payments()
config_id = fields.Many2one(
'pos.config', string='Point of Sale',
help="The physical point of sale you will use.",
required=True,
index=True)
name = fields.Char(string='Session ID', required=True, readonly=True, default='/')
user_id = fields.Many2one(
'res.users', string='Responsible',
required=True,
index=True,
readonly=True,
states={'opening_control': [('readonly', False)]},
default=lambda self: self.env.uid)
currency_id = fields.Many2one('res.currency', related='config_id.currency_id', string="Currency", readonly=False)
start_at = fields.Datetime(string='Opening Date', readonly=True)
stop_at = fields.Datetime(string='Closing Date', readonly=True, copy=False)
state = fields.Selection(
POS_SESSION_STATE, string='Status',
required=True, readonly=True,
index=True, copy=False, default='opening_control')
sequence_number = fields.Integer(string='Order Sequence Number', help='A sequence number that is incremented with each order', default=1)
login_number = fields.Integer(string='Login Sequence Number', help='A sequence number that is incremented each time a user resumes the pos session', default=0)
cash_control = fields.Boolean(compute='_compute_cash_all', string='Has Cash Control')
cash_journal_id = fields.Many2one('account.journal', compute='_compute_cash_all', string='Cash Journal', store=True)
cash_register_id = fields.Many2one('account.bank.statement', compute='_compute_cash_all', string='Cash Register', store=True)
cash_register_balance_end_real = fields.Monetary(
related='cash_register_id.balance_end_real',
string="Ending Balance",
help="Total of closing cash control lines.",
readonly=True)
cash_register_balance_start = fields.Monetary(
related='cash_register_id.balance_start',
string="Starting Balance",
help="Total of opening cash control lines.",
readonly=True)
cash_register_total_entry_encoding = fields.Monetary(
related='cash_register_id.total_entry_encoding',
string='Total Cash Transaction',
readonly=True,
help="Total of all paid sales orders")
cash_register_balance_end = fields.Monetary(
related='cash_register_id.balance_end',
digits=0,
string="Theoretical Closing Balance",
help="Sum of opening balance and transactions.",
readonly=True)
cash_register_difference = fields.Monetary(
related='cash_register_id.difference',
string='Difference',
help="Difference between the theoretical closing balance and the real closing balance.",
readonly=True)
journal_ids = fields.Many2many(
'account.journal',
related='config_id.journal_ids',
readonly=True,
string='Available Payment Methods')
order_ids = fields.One2many('pos.order', 'session_id', string='Orders')
statement_ids = fields.One2many('account.bank.statement', 'pos_session_id', string='Bank Statement', readonly=True)
picking_count = fields.Integer(compute='_compute_picking_count')
rescue = fields.Boolean(string='Recovery Session',
help="Auto-generated session for orphan orders, ignored in constraints",
readonly=True,
copy=False)
_sql_constraints = [('uniq_name', 'unique(name)', "The name of this POS Session must be unique !")]
@api.multi
def _compute_picking_count(self):
for pos in self:
pickings = pos.order_ids.mapped('picking_id').filtered(lambda x: x.state != 'done')
pos.picking_count = len(pickings.ids)
@api.multi
def action_stock_picking(self):
pickings = self.order_ids.mapped('picking_id').filtered(lambda x: x.state != 'done')
action_picking = self.env.ref('stock.action_picking_tree_ready')
action = action_picking.read()[0]
action['context'] = {}
action['domain'] = [('id', 'in', pickings.ids)]
return action
@api.depends('config_id', 'statement_ids')
def _compute_cash_all(self):
for session in self:
session.cash_journal_id = session.cash_register_id = session.cash_control = False
if session.config_id.cash_control:
for statement in session.statement_ids:
if statement.journal_id.type == 'cash':
session.cash_control = True
session.cash_journal_id = statement.journal_id.id
session.cash_register_id = statement.id
if not session.cash_control and session.state != 'closed':
raise UserError(_("Cash control can only be applied to cash journals."))
@api.constrains('user_id', 'state')
def _check_unicity(self):
# open if there is no session in 'opening_control', 'opened', 'closing_control' for one user
if self.search_count([
('state', 'not in', ('closed', 'closing_control')),
('user_id', '=', self.user_id.id),
('rescue', '=', False)
]) > 1:
raise ValidationError(_("You cannot create two active sessions with the same responsible."))
@api.constrains('config_id')
def _check_pos_config(self):
if self.search_count([
('state', '!=', 'closed'),
('config_id', '=', self.config_id.id),
('rescue', '=', False)
]) > 1:
raise ValidationError(_("Another session is already opened for this point of sale."))
@api.constrains('start_at')
def _check_start_date(self):
for record in self:
company = record.config_id.journal_id.company_id
start_date = record.start_at.date()
if (company.period_lock_date and start_date <= company.period_lock_date) or (company.fiscalyear_lock_date and start_date <= company.fiscalyear_lock_date):
raise ValidationError(_("You cannot create a session before the accounting lock date."))
@api.model
def create(self, values):
config_id = values.get('config_id') or self.env.context.get('default_config_id')
if not config_id:
raise UserError(_("You should assign a Point of Sale to your session."))
# journal_id is not required on the pos_config because it does not
# exists at the installation. If nothing is configured at the
# installation we do the minimal configuration. Impossible to do in
# the .xml files as the CoA is not yet installed.
pos_config = self.env['pos.config'].browse(config_id)
ctx = dict(self.env.context, company_id=pos_config.company_id.id)
if not pos_config.journal_id:
default_journals = pos_config.with_context(ctx).default_get(['journal_id', 'invoice_journal_id'])
if (not default_journals.get('journal_id') or
not default_journals.get('invoice_journal_id')):
raise UserError(_("Unable to open the session. You have to assign a sales journal to your point of sale."))
pos_config.with_context(ctx).sudo().write({
'journal_id': default_journals['journal_id'],
'invoice_journal_id': default_journals['invoice_journal_id']})
# define some cash journal if no payment method exists
if not pos_config.journal_ids:
Journal = self.env['account.journal']
journals = Journal.with_context(ctx).search([('journal_user', '=', True), ('type', '=', 'cash'), ('company_id', '=', pos_config.company_id.id)])
if not journals:
journals = Journal.with_context(ctx).search([('type', '=', 'cash'), ('company_id', '=', pos_config.company_id.id)])
if not journals:
journals = Journal.with_context(ctx).search([('journal_user', '=', True), ('company_id', '=', pos_config.company_id.id)])
if not journals:
raise ValidationError(_("No payment method configured! \nEither no Chart of Account is installed or no payment method is configured for this POS."))
journals.sudo().write({'journal_user': True})
pos_config.sudo().write({'journal_ids': [(6, 0, journals.ids)]})
pos_name = self.env['ir.sequence'].with_context(ctx).next_by_code('pos.session')
if values.get('name'):
pos_name += ' ' + values['name']
statements = []
ABS = self.env['account.bank.statement']
uid = SUPERUSER_ID if self.env.user.has_group('point_of_sale.group_pos_user') else self.env.user.id
for journal in pos_config.journal_ids:
# set the journal_id which should be used by
# account.bank.statement to set the opening balance of the
# newly created bank statement
ctx['journal_id'] = journal.id if pos_config.cash_control and journal.type == 'cash' else False
st_values = {
'journal_id': journal.id,
'user_id': self.env.user.id,
'name': pos_name
}
statements.append(ABS.with_context(ctx).sudo(uid).create(st_values).id)
values.update({
'name': pos_name,
'statement_ids': [(6, 0, statements)],
'config_id': config_id
})
res = super(PosSession, self.with_context(ctx).sudo(uid)).create(values)
if not pos_config.cash_control:
res.action_pos_session_open()
return res
@api.multi
def unlink(self):
for session in self.filtered(lambda s: s.statement_ids):
session.statement_ids.unlink()
return super(PosSession, self).unlink()
@api.multi
def login(self):
self.ensure_one()
self.write({
'login_number': self.login_number + 1,
})
@api.multi
def action_pos_session_open(self):
# second browse because we need to refetch the data from the DB for cash_register_id
# we only open sessions that haven't already been opened
for session in self.filtered(lambda session: session.state == 'opening_control'):
values = {}
if not session.start_at:
values['start_at'] = fields.Datetime.now()
values['state'] = 'opened'
session.write(values)
session.statement_ids.button_open()
return True
@api.multi
def action_pos_session_closing_control(self):
self._check_pos_session_balance()
for session in self:
session.write({'state': 'closing_control', 'stop_at': fields.Datetime.now()})
if not session.config_id.cash_control:
session.action_pos_session_close()
@api.multi
def _check_pos_session_balance(self):
for session in self:
for statement in session.statement_ids:
if (statement != session.cash_register_id) and (statement.balance_end != statement.balance_end_real):
statement.write({'balance_end_real': statement.balance_end})
@api.multi
def action_pos_session_validate(self):
self._check_pos_session_balance()
self.action_pos_session_close()
@api.multi
def action_pos_session_close(self):
# Close CashBox
for session in self:
company_id = session.config_id.company_id.id
ctx = dict(self.env.context, force_company=company_id, company_id=company_id, default_partner_type='customer')
for st in session.statement_ids:
if abs(st.difference) > st.journal_id.amount_authorized_diff:
# The pos manager can close statements with maximums.
if not self.user_has_groups("point_of_sale.group_pos_manager"):
raise UserError(_("Your ending balance is too different from the theoretical cash closing (%.2f), the maximum allowed is: %.2f. You can contact your manager to force it.") % (st.difference, st.journal_id.amount_authorized_diff))
if (st.journal_id.type not in ['bank', 'cash']):
raise UserError(_("The journal type for your payment method should be bank or cash."))
st.with_context(ctx).sudo().button_confirm_bank()
session.activity_unlink(['point_of_sale.mail_activity_old_session'])
self.with_context(ctx)._confirm_orders()
self.write({'state': 'closed'})
return {
'type': 'ir.actions.client',
'name': 'Point of Sale Menu',
'tag': 'reload',
'params': {'menu_id': self.env.ref('point_of_sale.menu_point_root').id},
}
@api.multi
def open_frontend_cb(self):
if not self.ids:
return {}
for session in self.filtered(lambda s: s.user_id.id != self.env.uid):
raise UserError(_("You cannot use the session of another user. This session is owned by %s. "
"Please first close this one to use this point of sale.") % session.user_id.name)
return {
'type': 'ir.actions.act_url',
'target': 'self',
'url': '/pos/web/',
}
@api.multi
def open_cashbox(self):
self.ensure_one()
context = dict(self._context)
balance_type = context.get('balance') or 'start'
context['bank_statement_id'] = self.cash_register_id.id
context['balance'] = balance_type
context['default_pos_id'] = self.config_id.id
action = {
'name': _('Cash Control'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'account.bank.statement.cashbox',
'view_id': self.env.ref('account.view_account_bnk_stmt_cashbox').id,
'type': 'ir.actions.act_window',
'context': context,
'target': 'new'
}
cashbox_id = None
if balance_type == 'start':
cashbox_id = self.cash_register_id.cashbox_start_id.id
else:
cashbox_id = self.cash_register_id.cashbox_end_id.id
if cashbox_id:
action['res_id'] = cashbox_id
return action
@api.model
def _alert_old_session(self):
# If the session is open for more then one week,
# log a next activity to close the session.
sessions = self.search([('start_at', '<=', (fields.datetime.now() - timedelta(days=7))), ('state', '!=', 'closed')])
for session in sessions:
if self.env['mail.activity'].search_count([('res_id', '=', session.id), ('res_model', '=', 'pos.session')]) == 0:
session.activity_schedule('point_of_sale.mail_activity_old_session',
user_id=session.user_id.id, note=_("Your PoS Session is open since ") + fields.Date.to_string(session.start_at)
+ _(", we advise you to close it and to create a new one."))
class ProcurementGroup(models.Model):
_inherit = 'procurement.group'
@api.model
def _run_scheduler_tasks(self, use_new_cursor=False, company_id=False):
super(ProcurementGroup, self)._run_scheduler_tasks(use_new_cursor=use_new_cursor, company_id=company_id)
self.env['pos.session']._alert_old_session()
if use_new_cursor:
self.env.cr.commit()
| gpl-3.0 |
mulkieran/blivet | blivet/formats/__init__.py | 2 | 19838 |
# __init__.py
# Entry point for anaconda storage formats subpackage.
#
# Copyright (C) 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Dave Lehman <dlehman@redhat.com>
#
import os
import importlib
from gi.repository import BlockDev as blockdev
from ..util import notify_kernel
from ..util import get_sysfs_path_by_name
from ..util import run_program
from ..util import ObjectID
from ..storage_log import log_method_call
from ..errors import DeviceFormatError, FormatCreateError, FormatDestroyError, FormatSetupError
from ..i18n import N_
from ..size import Size
import logging
log = logging.getLogger("blivet")
device_formats = {}
def register_device_format(fmt_class):
if not issubclass(fmt_class, DeviceFormat):
raise ValueError("arg1 must be a subclass of DeviceFormat")
device_formats[fmt_class._type] = fmt_class
log.debug("registered device format class %s as %s", fmt_class.__name__,
fmt_class._type)
default_fstypes = ("ext4", "ext3", "ext2")
def get_default_filesystem_type():
for fstype in default_fstypes:
try:
supported = get_device_format_class(fstype).supported
except AttributeError:
supported = None
if supported:
return fstype
raise DeviceFormatError("None of %s is supported by your kernel" % ",".join(default_fstypes))
def getFormat(fmt_type, *args, **kwargs):
""" Return an instance of the appropriate DeviceFormat class.
:param fmt_type: The name of the formatting type
:type fmt_type: str.
:return: the format instance
:rtype: :class:`DeviceFormat`
:raises: ValueError
.. note::
Any additional arguments will be passed on to the constructor for
the format class. See the various :class:`DeviceFormat` subclasses
for an exhaustive list of the arguments that can be passed.
"""
fmt_class = get_device_format_class(fmt_type)
if not fmt_class:
fmt_class = DeviceFormat
fmt = fmt_class(*args, **kwargs)
# this allows us to store the given type for formats we implement as
# DeviceFormat.
if fmt_type and fmt.type is None:
# unknown type, but we can set the name of the format
# this should add/set an instance attribute
fmt._name = fmt_type
log.debug("getFormat('%s') returning %s instance with object id %d",
fmt_type, fmt.__class__.__name__, fmt.id)
return fmt
def collect_device_format_classes():
""" Pick up all device format classes from this directory.
.. note::
Modules must call :func:`register_device_format` to make format
classes available to :func:`getFormat`.
"""
mydir = os.path.dirname(__file__)
myfile = os.path.basename(__file__)
(myfile_name, _ext) = os.path.splitext(myfile)
for module_file in os.listdir(mydir):
(mod_name, ext) = os.path.splitext(module_file)
if ext == ".py" and mod_name != myfile_name and not mod_name.startswith("."):
try:
globals()[mod_name] = importlib.import_module("."+mod_name, package=__package__)
except ImportError:
log.error("import of device format module '%s' failed", mod_name)
from traceback import format_exc
log.debug("%s", format_exc())
def get_device_format_class(fmt_type):
""" Return an appropriate format class.
:param fmt_type: The name of the format type.
:type fmt_type: str.
:returns: The chosen DeviceFormat class
:rtype: class.
Returns None if no class is found for fmt_type.
"""
if not device_formats:
collect_device_format_classes()
fmt = device_formats.get(fmt_type)
if not fmt:
for fmt_class in device_formats.values():
if fmt_type and fmt_type == fmt_class._name:
fmt = fmt_class
break
elif fmt_type in fmt_class._udevTypes:
fmt = fmt_class
break
return fmt
class DeviceFormat(ObjectID):
""" Generic device format.
This represents the absence of recognized formatting. That could mean a
device is uninitialized, has had zeros written to it, or contains some
valid formatting that this module does not support.
"""
_type = None
_name = N_("Unknown")
_udevTypes = []
partedFlag = None
partedSystem = None
_formattable = False # can be formatted
_supported = False # is supported
_linuxNative = False # for clearpart
_packages = [] # required packages
_resizable = False # can be resized
_maxSize = Size(0) # maximum size
_minSize = Size(0) # minimum size
_dump = False
_check = False
_hidden = False # hide devices with this formatting?
_ksMountpoint = None
def __init__(self, **kwargs):
"""
:keyword device: The path to the device node.
:type device: str
:keyword uuid: the formatting's UUID.
:type uuid: str
:keyword exists: Whether the formatting exists. (default: False)
:raises: ValueError
.. note::
The 'device' kwarg is required for existing formats. For non-
existent formats, it is only necessary that the :attr:`device`
attribute be set before the :meth:`create` method runs. Note
that you can specify the device at the last moment by specifying
it via the 'device' kwarg to the :meth:`create` method.
"""
ObjectID.__init__(self)
self._label = None
self._options = None
self._device = None
self.device = kwargs.get("device")
self.uuid = kwargs.get("uuid")
self.exists = kwargs.get("exists", False)
self.options = kwargs.get("options")
def __repr__(self):
s = ("%(classname)s instance (%(id)s) object id %(object_id)d--\n"
" type = %(type)s name = %(name)s status = %(status)s\n"
" device = %(device)s uuid = %(uuid)s exists = %(exists)s\n"
" options = %(options)s supported = %(supported)s"
" formattable = %(format)s resizable = %(resize)s\n" %
{"classname": self.__class__.__name__, "id": "%#x" % id(self),
"object_id": self.id,
"type": self.type, "name": self.name, "status": self.status,
"device": self.device, "uuid": self.uuid, "exists": self.exists,
"options": self.options, "supported": self.supported,
"format": self.formattable, "resize": self.resizable})
return s
@property
def _existence_str(self):
return "existing" if self.exists else "non-existent"
@property
def desc(self):
return str(self.type)
def __str__(self):
return "%s %s" % (self._existence_str, self.desc)
@property
def dict(self):
d = {"type": self.type, "name": self.name, "device": self.device,
"uuid": self.uuid, "exists": self.exists,
"options": self.options, "supported": self.supported,
"resizable": self.resizable}
return d
@classmethod
def labeling(cls):
"""Returns False by default since most formats are non-labeling."""
return False
@classmethod
def labelFormatOK(cls, label):
"""Checks whether the format of the label is OK for whatever
application is used by blivet to write a label for this format.
If there is no application that blivet uses to write a label,
then no format is acceptable, so must return False.
:param str label: The label to be checked
:rtype: bool
:return: True if the format of the label is OK, otherwise False
"""
# pylint: disable=unused-argument
return cls.labeling()
def _setLabel(self, label):
"""Sets the label for this format.
:param label: the label for this format
:type label: str or None
None means no label specified, or in other words, accept the default
label that the filesystem app may set. Once the device exists the
label should not be None, as the device must then have some label
even if just the empty label.
"" means the empty label, i.e., no label.
Some filesystems, even though they do not have a
labeling application may be already labeled, so we allow to set
the label of a filesystem even if a labeling application does not
exist. This can happen with the install media, for example, where
the filesystem on the CD has a label, but there is no labeling
application for the Iso9660FS format.
If a labeling application does exist, the label is not
required to have the correct format for that application.
The allowable format for the label may be more permissive than
the format allowed by the labeling application.
This method is not intended to be overridden.
"""
self._label = label
def _getLabel(self):
"""The label for this filesystem.
:return: the label for this device
:rtype: str
This method is not intended to be overridden.
"""
return self._label
def _setOptions(self, options):
self._options = options
def _getOptions(self):
return self._options
options = property(
lambda s: s._getOptions(),
lambda s,v: s._setOptions(v),
doc="fstab entry option string"
)
def _deviceCheck(self, devspec):
""" Verifies that device spec has a proper format.
:param devspec: the device spec
:type devspec: str or NoneType
:rtype: str or NoneType
:returns: an explanatory message if devspec fails check, else None
"""
if devspec and not devspec.startswith("/"):
return "device must be a fully qualified path"
return None
def _setDevice(self, devspec):
error_msg = self._deviceCheck(devspec)
if error_msg:
raise ValueError(error_msg)
self._device = devspec
def _getDevice(self):
return self._device
device = property(lambda f: f._getDevice(),
lambda f,d: f._setDevice(d),
doc="Full path the device this format occupies")
@property
def name(self):
return self._name or self.type
@property
def type(self):
return self._type
def notifyKernel(self):
log_method_call(self, device=self.device,
type=self.type)
if not self.device:
return
if self.device.startswith("/dev/mapper/"):
try:
name = blockdev.dm.node_from_name(os.path.basename(self.device))
except blockdev.DMError:
log.warning("failed to get dm node for %s", self.device)
return
elif self.device.startswith("/dev/md/"):
try:
name = blockdev.md.node_from_name(os.path.basename(self.device))
except blockdev.MDRaidError:
log.warning("failed to get md node for %s", self.device)
return
else:
name = self.device
path = get_sysfs_path_by_name(name)
try:
notify_kernel(path, action="change")
except (ValueError, IOError) as e:
log.warning("failed to notify kernel of change: %s", e)
def create(self, **kwargs):
""" Write the formatting to the specified block device.
:keyword device: path to device node
:type device: str.
:raises: FormatCreateError
:returns: None.
.. :note::
If a device node path is passed to this method it will overwrite
any previously set value of this instance's "device" attribute.
"""
log_method_call(self, device=self.device,
type=self.type, status=self.status)
self._preCreate(**kwargs)
self._create(**kwargs)
self._postCreate(**kwargs)
def _preCreate(self, **kwargs):
""" Perform checks and setup prior to creating the format. """
# allow late specification of device path
device = kwargs.get("device")
if device:
self.device = device
if not os.path.exists(self.device):
raise FormatCreateError("invalid device specification", self.device)
if self.exists:
raise DeviceFormatError("format already exists")
if self.status:
raise DeviceFormatError("device exists and is active")
# pylint: disable=unused-argument
def _create(self, **kwargs):
""" Type-specific create method. """
pass
# pylint: disable=unused-argument
def _postCreate(self, **kwargs):
self.exists = True
self.notifyKernel()
def destroy(self, **kwargs):
""" Remove the formatting from the associated block device.
:raises: FormatDestroyError
:returns: None.
"""
log_method_call(self, device=self.device,
type=self.type, status=self.status)
self._preDestroy(**kwargs)
self._destroy(**kwargs)
self._postDestroy(**kwargs)
# pylint: disable=unused-argument
def _preDestroy(self, **kwargs):
if not self.exists:
raise DeviceFormatError("format has not been created")
if self.status:
raise DeviceFormatError("device is active")
if not os.access(self.device, os.W_OK):
raise DeviceFormatError("device path does not exist or is not writable")
def _destroy(self, **kwargs):
rc = 0
err = ""
try:
rc = run_program(["wipefs", "-f", "-a", self.device])
except OSError as e:
err = str(e)
else:
if rc:
err = str(rc)
if err:
msg = "error wiping old signatures from %s: %s" % (self.device, err)
raise FormatDestroyError(msg)
def _postDestroy(self, **kwargs):
self.exists = False
self.notifyKernel()
def setup(self, **kwargs):
""" Activate the formatting.
:keyword device: device node path
:type device: str.
:raises: FormatSetupError.
:returns: None.
.. :note::
If a device node path is passed to this method it will overwrite
any previously set value of this instance's "device" attribute.
"""
log_method_call(self, device=self.device,
type=self.type, status=self.status)
if not self._preSetup(**kwargs):
return
self._setup(**kwargs)
self._postSetup(**kwargs)
def _preSetup(self, **kwargs):
""" Return True if setup should proceed. """
if not self.exists:
raise FormatSetupError("format has not been created")
# allow late specification of device path
device = kwargs.get("device")
if device:
self.device = device
if not self.device or not os.path.exists(self.device):
raise FormatSetupError("invalid device specification")
return not self.status
# pylint: disable=unused-argument
def _setup(self, **kwargs):
pass
# pylint: disable=unused-argument
def _postSetup(self, **kwargs):
pass
def teardown(self, **kwargs):
""" Deactivate the formatting. """
log_method_call(self, device=self.device,
type=self.type, status=self.status)
if not self._preTeardown(**kwargs):
return
self._teardown(**kwargs)
self._postTeardown(**kwargs)
def _preTeardown(self, **kwargs):
""" Return True if teardown should proceed. """
if not self.exists:
raise DeviceFormatError("format has not been created")
return self.status
def _teardown(self, **kwargs):
pass
def _postTeardown(self, **kwargs):
pass
@property
def status(self):
return (self.exists and
self.__class__ is not DeviceFormat and
isinstance(self.device, str) and
self.device and
os.path.exists(self.device))
@property
def formattable(self):
""" Can we create formats of this type? """
return self._formattable
@property
def supported(self):
""" Is this format a supported type?
Are the necessary external applications required by the
functionality that this format provides actually provided by
the environment in which blivet is running?
"""
return self._supported
@property
def packages(self):
""" Packages required to manage formats of this type. """
return self._packages
@property
def resizable(self):
""" Can formats of this type be resized? """
return self._resizable and self.exists
@property
def linuxNative(self):
""" Is this format type native to linux? """
return self._linuxNative
@property
def mountable(self):
""" Is this something we can mount? """
return False
@property
def dump(self):
""" Whether or not this format will be dumped by dump(8). """
return self._dump
@property
def check(self):
""" Whether or not this format is checked on boot. """
return self._check
@property
def maxSize(self):
""" Maximum size for this format type. """
return self._maxSize
@property
def minSize(self):
""" Minimum size for this format instance.
:returns: the minimum size for this format instance
:rtype: :class:`~.size.Size`
A value of 0 indicates an unknown size.
"""
return self._minSize
@property
def hidden(self):
""" Whether devices with this formatting should be hidden in UIs. """
return self._hidden
@property
def ksMountpoint(self):
return (self._ksMountpoint or self.type or "")
def populateKSData(self, data):
data.format = not self.exists
data.fstype = self.type
data.mountpoint = self.ksMountpoint
register_device_format(DeviceFormat)
collect_device_format_classes()
| gpl-2.0 |
wq/django-data-wizard | tests/test_naturalkey.py | 1 | 4915 | from .base import BaseImportTestCase
from tests.naturalkey_app.models import Place, Event
class NaturalKeyTestCase(BaseImportTestCase):
serializer_name = 'tests.naturalkey_app.wizard.NoteSerializer'
def test_manual(self):
run = self.upload_file('naturalkey.csv')
# Inspect unmatched columns and select choices
self.check_columns(run, 3, 2)
self.update_columns(run, {
'Note': {
'date': 'event[date]',
'place': 'event[place][name]',
}
})
# Start data import process, wait for completion
self.start_import(run, [])
# Verify results
self.check_data(run)
self.assert_log(run, [
'created',
'parse_columns',
'update_columns',
'do_import',
'import_complete',
])
def test_auto(self):
# Map columns but not identifiers
self.create_identifier('date', 'event[date]')
self.create_identifier('place', 'event[place][name]')
# Should abort due to unknown place identifiers
run = self.upload_file('naturalkey.csv')
self.auto_import(run, expect_input_required=True)
self.assert_log(run, [
'created',
'auto_import',
'parse_columns',
'parse_row_identifiers',
])
def test_auto_preset(self):
mpls = Place.objects.find('mpls')
chi = Place.objects.find('chi')
# Map columns and place identifiers
self.create_identifier('date', 'event[date]')
self.create_identifier('place', 'event[place][name]')
self.create_identifier('Minneapolis', 'event[place][name]', 'mpls')
self.create_identifier('Chicago', 'event[place][name]', 'chi')
# Should succeed since all identifers are mapped
run = self.upload_file('naturalkey.csv')
self.auto_import(run, expect_input_required=False)
# Verify results
self.check_data(run, extra_ranges=[
"Cell value 'Minneapolis -> event[place][name]=mpls'"
" at Rows 1-2, Column 0",
"Cell value 'Chicago -> event[place][name]=chi'"
" at Rows 3-4, Column 0",
])
self.assert_log(run, [
'created',
'auto_import',
'parse_columns',
'parse_row_identifiers',
'do_import',
'import_complete',
])
for record in run.record_set.all():
self.assertIn(record.content_object.event.place, [mpls, chi])
def test_manual_identifiers(self):
# Create one place
Place.objects.find('mpls')
# Inspect unmatched columns and select choices
run = self.upload_file('naturalkey.csv')
self.check_columns(run, 3, 2)
self.update_columns(run, {
'Note': {
'date': 'event[date]',
'place': 'event[place][name]',
}
})
# Match places (1 existing, 1 new)
self.check_row_identifiers(run, 2, 2)
self.update_row_identifiers(run, {
'naturalkey_app.place': {
'Minneapolis': 'mpls',
'Chicago': 'new',
}
})
# "new" place is not created until actual import
self.assertEqual(Place.objects.count(), 1)
# Start data import process, wait for completion
self.start_import(run, [])
# Verify results
self.check_data(run, extra_ranges=[
"Cell value 'Minneapolis -> event[place][name]=mpls'"
" at Rows 1-2, Column 0",
"Cell value 'Chicago -> event[place][name]=Chicago'"
" at Rows 3-4, Column 0",
])
self.assert_log(run, [
'created',
'parse_columns',
'update_columns',
'parse_row_identifiers',
'update_row_identifiers',
'do_import',
'import_complete',
])
def check_data(self, run, extra_ranges=[]):
self.assertEqual(Event.objects.count(), 3)
self.assertEqual(Place.objects.count(), 2)
mpls, chi = Place.objects.order_by('pk')
self.assert_status(run, 4)
self.assert_ranges(run, [
"Data Column 'place -> event[place][name]' at Rows 1-4, Column 0",
"Data Column 'date -> event[date]' at Rows 1-4, Column 1",
"Data Column 'note -> note' at Rows 1-4, Column 2",
] + extra_ranges)
self.assert_records(run, [
"Imported '%s on 2017-06-01: Test Note 1' at row 1" % mpls.name,
"Imported '%s on 2017-06-01: Test Note 2' at row 2" % mpls.name,
"Imported '%s on 2017-06-01: Test Note 3' at row 3" % chi.name,
"Imported '%s on 2017-06-02: Test Note 4' at row 4" % chi.name,
])
self.assert_urls(run, 'notes/%s')
| mit |
kdebrab/pandas | pandas/tests/tseries/test_holiday.py | 16 | 16104 | import pytest
from datetime import datetime
import pandas.util.testing as tm
from pandas import compat
from pandas import DatetimeIndex
from pandas.tseries.holiday import (USFederalHolidayCalendar, USMemorialDay,
USThanksgivingDay, nearest_workday,
next_monday_or_tuesday, next_monday,
previous_friday, sunday_to_monday, Holiday,
DateOffset, MO, SA, Timestamp,
AbstractHolidayCalendar, get_calendar,
HolidayCalendarFactory, next_workday,
previous_workday, before_nearest_workday,
EasterMonday, GoodFriday,
after_nearest_workday, weekend_to_monday,
USLaborDay, USColumbusDay,
USMartinLutherKingJr, USPresidentsDay)
from pytz import utc
class TestCalendar(object):
def setup_method(self, method):
self.holiday_list = [
datetime(2012, 1, 2),
datetime(2012, 1, 16),
datetime(2012, 2, 20),
datetime(2012, 5, 28),
datetime(2012, 7, 4),
datetime(2012, 9, 3),
datetime(2012, 10, 8),
datetime(2012, 11, 12),
datetime(2012, 11, 22),
datetime(2012, 12, 25)]
self.start_date = datetime(2012, 1, 1)
self.end_date = datetime(2012, 12, 31)
def test_calendar(self):
calendar = USFederalHolidayCalendar()
holidays = calendar.holidays(self.start_date, self.end_date)
holidays_1 = calendar.holidays(
self.start_date.strftime('%Y-%m-%d'),
self.end_date.strftime('%Y-%m-%d'))
holidays_2 = calendar.holidays(
Timestamp(self.start_date),
Timestamp(self.end_date))
assert list(holidays.to_pydatetime()) == self.holiday_list
assert list(holidays_1.to_pydatetime()) == self.holiday_list
assert list(holidays_2.to_pydatetime()) == self.holiday_list
def test_calendar_caching(self):
# Test for issue #9552
class TestCalendar(AbstractHolidayCalendar):
def __init__(self, name=None, rules=None):
super(TestCalendar, self).__init__(name=name, rules=rules)
jan1 = TestCalendar(rules=[Holiday('jan1', year=2015, month=1, day=1)])
jan2 = TestCalendar(rules=[Holiday('jan2', year=2015, month=1, day=2)])
tm.assert_index_equal(jan1.holidays(), DatetimeIndex(['01-Jan-2015']))
tm.assert_index_equal(jan2.holidays(), DatetimeIndex(['02-Jan-2015']))
def test_calendar_observance_dates(self):
# Test for issue 11477
USFedCal = get_calendar('USFederalHolidayCalendar')
holidays0 = USFedCal.holidays(datetime(2015, 7, 3), datetime(
2015, 7, 3)) # <-- same start and end dates
holidays1 = USFedCal.holidays(datetime(2015, 7, 3), datetime(
2015, 7, 6)) # <-- different start and end dates
holidays2 = USFedCal.holidays(datetime(2015, 7, 3), datetime(
2015, 7, 3)) # <-- same start and end dates
tm.assert_index_equal(holidays0, holidays1)
tm.assert_index_equal(holidays0, holidays2)
def test_rule_from_name(self):
USFedCal = get_calendar('USFederalHolidayCalendar')
assert USFedCal.rule_from_name('Thanksgiving') == USThanksgivingDay
class TestHoliday(object):
def setup_method(self, method):
self.start_date = datetime(2011, 1, 1)
self.end_date = datetime(2020, 12, 31)
def check_results(self, holiday, start, end, expected):
assert list(holiday.dates(start, end)) == expected
# Verify that timezone info is preserved.
assert (list(holiday.dates(utc.localize(Timestamp(start)),
utc.localize(Timestamp(end)))) ==
[utc.localize(dt) for dt in expected])
def test_usmemorialday(self):
self.check_results(holiday=USMemorialDay,
start=self.start_date,
end=self.end_date,
expected=[
datetime(2011, 5, 30),
datetime(2012, 5, 28),
datetime(2013, 5, 27),
datetime(2014, 5, 26),
datetime(2015, 5, 25),
datetime(2016, 5, 30),
datetime(2017, 5, 29),
datetime(2018, 5, 28),
datetime(2019, 5, 27),
datetime(2020, 5, 25),
], )
def test_non_observed_holiday(self):
self.check_results(
Holiday('July 4th Eve', month=7, day=3),
start="2001-01-01",
end="2003-03-03",
expected=[
Timestamp('2001-07-03 00:00:00'),
Timestamp('2002-07-03 00:00:00')
]
)
self.check_results(
Holiday('July 4th Eve', month=7, day=3, days_of_week=(0, 1, 2, 3)),
start="2001-01-01",
end="2008-03-03",
expected=[
Timestamp('2001-07-03 00:00:00'),
Timestamp('2002-07-03 00:00:00'),
Timestamp('2003-07-03 00:00:00'),
Timestamp('2006-07-03 00:00:00'),
Timestamp('2007-07-03 00:00:00'),
]
)
def test_easter(self):
self.check_results(EasterMonday,
start=self.start_date,
end=self.end_date,
expected=[
Timestamp('2011-04-25 00:00:00'),
Timestamp('2012-04-09 00:00:00'),
Timestamp('2013-04-01 00:00:00'),
Timestamp('2014-04-21 00:00:00'),
Timestamp('2015-04-06 00:00:00'),
Timestamp('2016-03-28 00:00:00'),
Timestamp('2017-04-17 00:00:00'),
Timestamp('2018-04-02 00:00:00'),
Timestamp('2019-04-22 00:00:00'),
Timestamp('2020-04-13 00:00:00'),
], )
self.check_results(GoodFriday,
start=self.start_date,
end=self.end_date,
expected=[
Timestamp('2011-04-22 00:00:00'),
Timestamp('2012-04-06 00:00:00'),
Timestamp('2013-03-29 00:00:00'),
Timestamp('2014-04-18 00:00:00'),
Timestamp('2015-04-03 00:00:00'),
Timestamp('2016-03-25 00:00:00'),
Timestamp('2017-04-14 00:00:00'),
Timestamp('2018-03-30 00:00:00'),
Timestamp('2019-04-19 00:00:00'),
Timestamp('2020-04-10 00:00:00'),
], )
def test_usthanksgivingday(self):
self.check_results(USThanksgivingDay,
start=self.start_date,
end=self.end_date,
expected=[
datetime(2011, 11, 24),
datetime(2012, 11, 22),
datetime(2013, 11, 28),
datetime(2014, 11, 27),
datetime(2015, 11, 26),
datetime(2016, 11, 24),
datetime(2017, 11, 23),
datetime(2018, 11, 22),
datetime(2019, 11, 28),
datetime(2020, 11, 26),
], )
def test_holidays_within_dates(self):
# Fix holiday behavior found in #11477
# where holiday.dates returned dates outside start/end date
# or observed rules could not be applied as the holiday
# was not in the original date range (e.g., 7/4/2015 -> 7/3/2015)
start_date = datetime(2015, 7, 1)
end_date = datetime(2015, 7, 1)
calendar = get_calendar('USFederalHolidayCalendar')
new_years = calendar.rule_from_name('New Years Day')
july_4th = calendar.rule_from_name('July 4th')
veterans_day = calendar.rule_from_name('Veterans Day')
christmas = calendar.rule_from_name('Christmas')
# Holiday: (start/end date, holiday)
holidays = {USMemorialDay: ("2015-05-25", "2015-05-25"),
USLaborDay: ("2015-09-07", "2015-09-07"),
USColumbusDay: ("2015-10-12", "2015-10-12"),
USThanksgivingDay: ("2015-11-26", "2015-11-26"),
USMartinLutherKingJr: ("2015-01-19", "2015-01-19"),
USPresidentsDay: ("2015-02-16", "2015-02-16"),
GoodFriday: ("2015-04-03", "2015-04-03"),
EasterMonday: [("2015-04-06", "2015-04-06"),
("2015-04-05", [])],
new_years: [("2015-01-01", "2015-01-01"),
("2011-01-01", []),
("2010-12-31", "2010-12-31")],
july_4th: [("2015-07-03", "2015-07-03"),
("2015-07-04", [])],
veterans_day: [("2012-11-11", []),
("2012-11-12", "2012-11-12")],
christmas: [("2011-12-25", []),
("2011-12-26", "2011-12-26")]}
for rule, dates in compat.iteritems(holidays):
empty_dates = rule.dates(start_date, end_date)
assert empty_dates.tolist() == []
if isinstance(dates, tuple):
dates = [dates]
for start, expected in dates:
if len(expected):
expected = [Timestamp(expected)]
self.check_results(rule, start, start, expected)
def test_argument_types(self):
holidays = USThanksgivingDay.dates(self.start_date, self.end_date)
holidays_1 = USThanksgivingDay.dates(
self.start_date.strftime('%Y-%m-%d'),
self.end_date.strftime('%Y-%m-%d'))
holidays_2 = USThanksgivingDay.dates(
Timestamp(self.start_date),
Timestamp(self.end_date))
tm.assert_index_equal(holidays, holidays_1)
tm.assert_index_equal(holidays, holidays_2)
def test_special_holidays(self):
base_date = [datetime(2012, 5, 28)]
holiday_1 = Holiday('One-Time', year=2012, month=5, day=28)
holiday_2 = Holiday('Range', month=5, day=28,
start_date=datetime(2012, 1, 1),
end_date=datetime(2012, 12, 31),
offset=DateOffset(weekday=MO(1)))
assert base_date == holiday_1.dates(self.start_date, self.end_date)
assert base_date == holiday_2.dates(self.start_date, self.end_date)
def test_get_calendar(self):
class TestCalendar(AbstractHolidayCalendar):
rules = []
calendar = get_calendar('TestCalendar')
assert TestCalendar == calendar.__class__
def test_factory(self):
class_1 = HolidayCalendarFactory('MemorialDay',
AbstractHolidayCalendar,
USMemorialDay)
class_2 = HolidayCalendarFactory('Thansksgiving',
AbstractHolidayCalendar,
USThanksgivingDay)
class_3 = HolidayCalendarFactory('Combined', class_1, class_2)
assert len(class_1.rules) == 1
assert len(class_2.rules) == 1
assert len(class_3.rules) == 2
class TestObservanceRules(object):
def setup_method(self, method):
self.we = datetime(2014, 4, 9)
self.th = datetime(2014, 4, 10)
self.fr = datetime(2014, 4, 11)
self.sa = datetime(2014, 4, 12)
self.su = datetime(2014, 4, 13)
self.mo = datetime(2014, 4, 14)
self.tu = datetime(2014, 4, 15)
def test_next_monday(self):
assert next_monday(self.sa) == self.mo
assert next_monday(self.su) == self.mo
def test_next_monday_or_tuesday(self):
assert next_monday_or_tuesday(self.sa) == self.mo
assert next_monday_or_tuesday(self.su) == self.tu
assert next_monday_or_tuesday(self.mo) == self.tu
def test_previous_friday(self):
assert previous_friday(self.sa) == self.fr
assert previous_friday(self.su) == self.fr
def test_sunday_to_monday(self):
assert sunday_to_monday(self.su) == self.mo
def test_nearest_workday(self):
assert nearest_workday(self.sa) == self.fr
assert nearest_workday(self.su) == self.mo
assert nearest_workday(self.mo) == self.mo
def test_weekend_to_monday(self):
assert weekend_to_monday(self.sa) == self.mo
assert weekend_to_monday(self.su) == self.mo
assert weekend_to_monday(self.mo) == self.mo
def test_next_workday(self):
assert next_workday(self.sa) == self.mo
assert next_workday(self.su) == self.mo
assert next_workday(self.mo) == self.tu
def test_previous_workday(self):
assert previous_workday(self.sa) == self.fr
assert previous_workday(self.su) == self.fr
assert previous_workday(self.tu) == self.mo
def test_before_nearest_workday(self):
assert before_nearest_workday(self.sa) == self.th
assert before_nearest_workday(self.su) == self.fr
assert before_nearest_workday(self.tu) == self.mo
def test_after_nearest_workday(self):
assert after_nearest_workday(self.sa) == self.mo
assert after_nearest_workday(self.su) == self.tu
assert after_nearest_workday(self.fr) == self.mo
class TestFederalHolidayCalendar(object):
def test_no_mlk_before_1986(self):
# see gh-10278
class MLKCalendar(AbstractHolidayCalendar):
rules = [USMartinLutherKingJr]
holidays = MLKCalendar().holidays(start='1984',
end='1988').to_pydatetime().tolist()
# Testing to make sure holiday is not incorrectly observed before 1986
assert holidays == [datetime(1986, 1, 20, 0, 0),
datetime(1987, 1, 19, 0, 0)]
def test_memorial_day(self):
class MemorialDay(AbstractHolidayCalendar):
rules = [USMemorialDay]
holidays = MemorialDay().holidays(start='1971',
end='1980').to_pydatetime().tolist()
# Fixes 5/31 error and checked manually against Wikipedia
assert holidays == [datetime(1971, 5, 31, 0, 0),
datetime(1972, 5, 29, 0, 0),
datetime(1973, 5, 28, 0, 0),
datetime(1974, 5, 27, 0, 0),
datetime(1975, 5, 26, 0, 0),
datetime(1976, 5, 31, 0, 0),
datetime(1977, 5, 30, 0, 0),
datetime(1978, 5, 29, 0, 0),
datetime(1979, 5, 28, 0, 0)]
class TestHolidayConflictingArguments(object):
def test_both_offset_observance_raises(self):
# see gh-10217
with pytest.raises(NotImplementedError):
Holiday("Cyber Monday", month=11, day=1,
offset=[DateOffset(weekday=SA(4))],
observance=next_monday)
| bsd-3-clause |
slagle/ansible-modules-extras | notification/osx_say.py | 161 | 2108 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Michael DeHaan <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: osx_say
version_added: "1.2"
short_description: Makes an OSX computer to speak.
description:
- makes an OS computer speak! Amuse your friends, annoy your coworkers!
notes:
- If you like this module, you may also be interested in the osx_say callback in the plugins/ directory of the source checkout.
options:
msg:
description:
What to say
required: true
voice:
description:
What voice to use
required: false
requirements: [ say ]
author:
- "Ansible Core Team"
- "Michael DeHaan (@mpdehaan)"
'''
EXAMPLES = '''
- local_action: osx_say msg="{{inventory_hostname}} is all done" voice=Zarvox
'''
DEFAULT_VOICE='Trinoids'
def say(module, msg, voice):
module.run_command(["/usr/bin/say", msg, "--voice=%s" % (voice)], check_rc=True)
def main():
module = AnsibleModule(
argument_spec=dict(
msg=dict(required=True),
voice=dict(required=False, default=DEFAULT_VOICE),
),
supports_check_mode=False
)
if not os.path.exists("/usr/bin/say"):
module.fail_json(msg="/usr/bin/say is not installed")
msg = module.params['msg']
voice = module.params['voice']
say(module, msg, voice)
module.exit_json(msg=msg, changed=False)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
signed/intellij-community | python/helpers/py3only/docutils/parsers/rst/directives/admonitions.py | 44 | 2404 | # $Id: admonitions.py 7681 2013-07-12 07:52:27Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Admonition directives.
"""
__docformat__ = 'reStructuredText'
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
from docutils.parsers.rst.roles import set_classes
class BaseAdmonition(Directive):
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
node_class = None
"""Subclasses must set this to the appropriate admonition node class."""
def run(self):
set_classes(self.options)
self.assert_has_content()
text = '\n'.join(self.content)
admonition_node = self.node_class(text, **self.options)
self.add_name(admonition_node)
if self.node_class is nodes.admonition:
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text,
self.lineno)
title = nodes.title(title_text, '', *textnodes)
title.source, title.line = (
self.state_machine.get_source_and_line(self.lineno))
admonition_node += title
admonition_node += messages
if not 'classes' in self.options:
admonition_node['classes'] += ['admonition-' +
nodes.make_id(title_text)]
self.state.nested_parse(self.content, self.content_offset,
admonition_node)
return [admonition_node]
class Admonition(BaseAdmonition):
required_arguments = 1
node_class = nodes.admonition
class Attention(BaseAdmonition):
node_class = nodes.attention
class Caution(BaseAdmonition):
node_class = nodes.caution
class Danger(BaseAdmonition):
node_class = nodes.danger
class Error(BaseAdmonition):
node_class = nodes.error
class Hint(BaseAdmonition):
node_class = nodes.hint
class Important(BaseAdmonition):
node_class = nodes.important
class Note(BaseAdmonition):
node_class = nodes.note
class Tip(BaseAdmonition):
node_class = nodes.tip
class Warning(BaseAdmonition):
node_class = nodes.warning
| apache-2.0 |
hwine/build-relengapi | relengapi/blueprints/archiver/tasks.py | 3 | 3294 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
import shutil
import tempfile
from random import randint
import requests
import structlog
from boto.s3.key import Key
from celery.task import current
from flask import current_app
from relengapi.lib import celery
logger = structlog.get_logger()
SIGNED_URL_EXPIRY = 300
TASK_EXPIRY = 1800
TASK_TIME_OUT = 3600
def upload_url_archive_to_s3(key, url, buckets):
s3_urls = {}
logger.info('Key to be uploaded to S3: %s - Verifying src_url: %s', key, url)
resp = requests.get(url, stream=True, timeout=60)
try:
resp.raise_for_status()
except requests.exceptions.HTTPError:
status = "Could not get a valid response from src_url. Does {} exist?".format(url)
logger.exception(status)
resp.close()
return s3_urls, status
logger.info('S3 Key: %s - downloading and unpacking archive from src_url', key)
# create a temporary file for it
tempf = tempfile.TemporaryFile()
# copy the data, block-by-block, into that file
resp.raw.decode_content = True
shutil.copyfileobj(resp.raw, tempf)
# write it out to S3
for region in buckets:
s3 = current_app.aws.connect_to('s3', region)
k = Key(s3.get_bucket(buckets[region]))
k.key = key
k.set_metadata('Content-Type', resp.headers['Content-Type'])
# give it the same attachment filename
k.set_metadata('Content-Disposition', resp.headers['Content-Disposition'])
k.set_contents_from_file(tempf, rewind=True) # rewind points tempf back to start
s3_urls[region] = s3.generate_url(expires_in=SIGNED_URL_EXPIRY, method='GET',
bucket=buckets[region], key=key)
status = "Task completed! Check 's3_urls' for upload locations."
resp.close()
return s3_urls, status
@celery.task(bind=True, track_started=True, max_retries=3,
time_limit=TASK_TIME_OUT, expires=TASK_EXPIRY)
def create_and_upload_archive(self, src_url, key):
"""
A celery task that downloads an archive if it exists from a src location and attempts to upload
the archive to a supported bucket in each supported region.
Throughout this process, update the state of the task and finally return the location of the
s3 urls if successful.
expires after 30m if the task hasn't been picked up from the message queue
task is killed if exceeds time_limit of an hour after it has started
"""
status = ""
s3_urls = {}
buckets = current_app.config['ARCHIVER_S3_BUCKETS']
try:
s3_urls, status = upload_url_archive_to_s3(key, src_url, buckets)
except Exception as exc:
# set a jitter enabled delay
# where an aggressive delay would result in: 7s, 49s, and 343s
# and a gentle delay would result in: 4s, 16s, and 64s
delay = randint(4, 7) ** (current.request.retries + 1) # retries == 0 on first attempt
current.retry(exc=exc, countdown=delay)
return {
'status': status,
'src_url': src_url,
's3_urls': s3_urls,
}
| mpl-2.0 |
petricm/DIRAC | ResourceStatusSystem/PolicySystem/StateMachine.py | 2 | 9578 | """ StateMachine
This module contains the basic blocks to build a state machine ( State and
StateMachine ). And the RSS implementation of it, using its own states map.
"""
from DIRAC import S_OK, S_ERROR, gLogger
__RCSID__ = '$Id$'
class State( object ):
"""
State class that represents a single step on a StateMachine, with all the
possible transitions, the default transition and an ordering level.
examples:
>>> s0 = State( 100 )
>>> s1 = State( 0, [ 'StateName1', 'StateName2' ], defState = 'StateName1' )
>>> s2 = State( 0, [ 'StateName1', 'StateName2' ] )
# this example is tricky. The transition rule says that will go to
# nextState, e.g. 'StateNext'. But, it is not on the stateMap, and there
# is no default defined, so it will end up going to StateNext anyway. You
# must be careful while defining states and their stateMaps and defaults.
:param int level: each state is mapped to an integer, which is used to sort the states according to that integer.
:param stateMap: it is a list ( of strings ) with the reachable states from this particular
status. If not defined, we assume there are no restrictions.
:type stateMap: python:list
:param defState: default state used in case the next state it is not stateMap ( not defined
or simply not there ).
:type defState: None or str
"""
def __init__( self, level, stateMap = list(), defState = None ):
"""
Constructor.
"""
self.level = level
self.stateMap = stateMap
self.default = defState
def transitionRule( self, nextState ):
"""
Method that selects next state, knowing the default and the transitions
map, and the proposed next state. If <nextState> is in stateMap, goes there.
If not, then goes to <self.default> if any. Otherwise, goes to <nextState>
anyway.
examples:
>>> s0.transitionRule( 'nextState' )
'nextState'
>>> s1.transitionRule( 'StateName2' )
'StateName2'
>>> s1.transitionRule( 'StateNameNotInMap' )
'StateName1'
>>> s2.transitionRule( 'StateNameNotInMap' )
'StateNameNotInMap'
:param str nextState: name of the state in the stateMap
:return: state name
:rtype: str
"""
#If next state is on the list of next states, go ahead.
if nextState in self.stateMap:
return nextState
#If not, calculate defaultState:
# if there is a default, that one
# otherwise is nextState ( states with empty list have no movement restrictions )
defaultNext = ( 1 and self.default ) or nextState
return defaultNext
class StateMachine( object ):
"""
StateMachine class that represents the whole state machine with all transitions.
examples:
>>> sm0 = StateMachine()
>>> sm1 = StateMachine( state = 'Active' )
:param state: current state of the StateMachine, could be None if we do not use the
StateMachine to calculate transitions. Beware, it is not checked if the
state is on the states map !
:type state: None or str
"""
def __init__( self, state = None ):
"""
Constructor.
"""
self.state = state
# To be overwritten by child classes, unless you like Nirvana state that much.
self.states = { 'Nirvana' : State( 100 ) }
def levelOfState( self, state ):
"""
Given a state name, it returns its level ( integer ), which defines the hierarchy.
>>> sm0.levelOfState( 'Nirvana' )
100
>>> sm0.levelOfState( 'AnotherState' )
-1
:param str state: name of the state, it should be on <self.states> key set
:return: `int` || -1 ( if not in <self.states> )
"""
if not state in self.states:
return -1
return self.states[ state ].level
def setState(self, candidateState):
""" Makes sure the state is either None or known to the machine, and that it is a valid state to move into.
Final states are also checked.
examples:
>>> sm0.setState( None )[ 'OK' ]
True
>>> sm0.setState( 'Nirvana' )[ 'OK' ]
True
>>> sm0.setState( 'AnotherState' )[ 'OK' ]
False
:param state: state which will be set as current state of the StateMachine
:type state: None or str
:return: S_OK || S_ERROR
"""
if candidateState == self.state:
return S_OK(candidateState)
if candidateState is None:
self.state = candidateState
elif candidateState in self.states.keys():
if not self.states[self.state].stateMap:
gLogger.warn("Final state, won't move")
return S_OK(self.state)
if candidateState not in self.states[self.state].stateMap:
gLogger.warn("Can't move from %s to %s, choosing a good one" % (self.state, candidateState))
nextState = self.getNextState(candidateState)
if not nextState['OK']:
return nextState
nextState = nextState['Value']
# If the StateMachine does not accept the candidate, return error message
self.state = nextState
else:
return S_ERROR("%s is not a valid state" % candidateState)
return S_OK(nextState)
def getStates( self ):
"""
Returns all possible states in the state map
examples:
>>> sm0.getStates()
[ 'Nirvana' ]
:return: list( stateNames )
"""
return self.states.keys()
def getNextState( self, candidateState ):
"""
Method that gets the next state, given the proposed transition to candidateState.
If candidateState is not on the state map <self.states>, it is rejected. If it is
not the case, we have two options: if <self.state> is None, then the next state
will be <candidateState>. Otherwise, the current state is using its own
transition rule to decide.
examples:
>>> sm0.getNextState( None )
S_OK( None )
>>> sm0.getNextState( 'NextState' )
S_OK( 'NextState' )
:param str candidateState: name of the next state
:return: S_OK( nextState ) || S_ERROR
"""
if not candidateState in self.states:
return S_ERROR( '%s is not a valid state' % candidateState )
# FIXME: do we need this anymore ?
if self.state is None:
return S_OK( candidateState )
return S_OK( self.states[ self.state ].transitionRule( candidateState ) )
#...............................................................................
class RSSMachine( StateMachine ):
"""
RSS implementation of the State Machine. It defines six states, which ordered
by level conform the following list ( higher level first ): Unknown, Active,
Degraded, Probing, Banned, Error.
The StateMachine allows any transition except if the current state is Banned,
which will force any transition to any state different of Error, Banned and
Probing to Probing.
examples:
>>> rsm0 = RSSMachine( None )
>>> rsm1 = RSSMachine( 'Unknown' )
:param state: name of the current state of the StateMachine
:type state: None or str
"""
def __init__( self, state ):
"""
Constructor.
"""
super( RSSMachine, self ).__init__( state )
# Defines state map.
self.states = {
'Unknown' : State( 5 ),
'Active' : State( 4 ),
'Degraded' : State( 3 ),
'Probing' : State( 2 ),
'Banned' : State( 1, [ 'Error', 'Banned', 'Probing' ], defState = 'Probing' ),
'Error' : State( 0 )
}
def orderPolicyResults( self, policyResults ):
"""
Method built specifically to interact with the policy results obtained on the
PDP module. It sorts the input based on the level of their statuses, the lower
the level state, the leftmost position in the list. Beware, if any of the statuses
is not know to the StateMachine, it will be ordered first, as its level will be
-1 !.
examples:
>>> rsm0.orderPolicyResults( [ { 'Status' : 'Active', 'A' : 'A' },
{ 'Status' : 'Banned', 'B' : 'B' } ] )
[ { 'Status' : 'Banned', 'B' : 'B' }, { 'Status' : 'Active', 'A' : 'A' } ]
>>> rsm0.orderPolicyResults( [ { 'Status' : 'Active', 'A' : 'A' },
{ 'Status' : 'Rubbish', 'R' : 'R' } ] )
[ { 'Status' : 'Rubbish', 'R' : 'R' }, { 'Status' : 'Active', 'A' : 'A' } ]
:param policyResults: list of dictionaries to be ordered. The dictionary can have any key as
far as the key `Status` is present.
:type policyResults: python:list
:result: list( dict ), which is ordered
"""
#We really do not need to return, as the list is mutable
policyResults.sort( key = self.levelOfPolicyState )
def levelOfPolicyState( self, policyResult ):
"""
Returns the level of the state associated with the policy, -1 if something
goes wrong. It is mostly used while sorting policies with method `orderPolicyResults`.
examples:
>>> rsm0.levelOfPolicyState( { 'Status' : 'Active', 'A' : 'A' } )
5
>>> rsm0.levelOfPolicyState( { 'Status' : 'Rubbish', 'R' : 'R' } )
-1
:param dict policyResult: dictionary that must have the `Status` key.
:return: int || -1 ( if policyResult[ 'Status' ] is not known by the StateMachine )
"""
return self.levelOfState( policyResult[ 'Status' ] )
#...............................................................................
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| gpl-3.0 |
pixelgremlins/ztruck | dj/lib/python2.7/site-packages/django/db/backends/sqlite3/features.py | 119 | 2631 | from __future__ import unicode_literals
import sys
from django.db import utils
from django.db.backends.base.features import BaseDatabaseFeatures
from django.utils.functional import cached_property
from .base import Database
try:
import pytz
except ImportError:
pytz = None
class DatabaseFeatures(BaseDatabaseFeatures):
# SQLite cannot handle us only partially reading from a cursor's result set
# and then writing the same rows to the database in another cursor. This
# setting ensures we always read result sets fully into memory all in one
# go.
can_use_chunked_reads = False
test_db_allows_multiple_connections = False
supports_unspecified_pk = True
supports_timezones = False
supports_1000_query_parameters = False
supports_mixed_date_datetime_comparisons = False
has_bulk_insert = True
can_combine_inserts_with_and_without_auto_increment_pk = False
supports_foreign_keys = False
supports_column_check_constraints = False
autocommits_when_autocommit_is_off = True
can_introspect_decimal_field = False
can_introspect_positive_integer_field = True
can_introspect_small_integer_field = True
supports_transactions = True
atomic_transactions = False
can_rollback_ddl = True
supports_paramstyle_pyformat = False
supports_sequence_reset = False
@cached_property
def uses_savepoints(self):
return Database.sqlite_version_info >= (3, 6, 8)
@cached_property
def can_release_savepoints(self):
return self.uses_savepoints
@cached_property
def can_share_in_memory_db(self):
return (
sys.version_info[:2] >= (3, 4) and
Database.__name__ == 'sqlite3.dbapi2' and
Database.sqlite_version_info >= (3, 7, 13)
)
@cached_property
def supports_stddev(self):
"""Confirm support for STDDEV and related stats functions
SQLite supports STDDEV as an extension package; so
connection.ops.check_expression_support() can't unilaterally
rule out support for STDDEV. We need to manually check
whether the call works.
"""
with self.connection.cursor() as cursor:
cursor.execute('CREATE TABLE STDDEV_TEST (X INT)')
try:
cursor.execute('SELECT STDDEV(*) FROM STDDEV_TEST')
has_support = True
except utils.DatabaseError:
has_support = False
cursor.execute('DROP TABLE STDDEV_TEST')
return has_support
@cached_property
def has_zoneinfo_database(self):
return pytz is not None
| apache-2.0 |
gareging/SDN_Framework | ryu/tests/unit/packet/test_vlan.py | 27 | 6748 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import logging
import struct
from struct import *
from nose.tools import *
from ryu.ofproto import ether, inet
from ryu.lib.packet.ethernet import ethernet
from ryu.lib.packet.packet import Packet
from ryu.lib.packet.ipv4 import ipv4
from ryu.lib.packet.vlan import vlan
from ryu.lib.packet.vlan import svlan
LOG = logging.getLogger('test_vlan')
class Test_vlan(unittest.TestCase):
""" Test case for vlan
"""
pcp = 0
cfi = 0
vid = 32
tci = pcp << 15 | cfi << 12 | vid
ethertype = ether.ETH_TYPE_IP
buf = pack(vlan._PACK_STR, tci, ethertype)
v = vlan(pcp, cfi, vid, ethertype)
def setUp(self):
pass
def tearDown(self):
pass
def find_protocol(self, pkt, name):
for p in pkt.protocols:
if p.protocol_name == name:
return p
def test_init(self):
eq_(self.pcp, self.v.pcp)
eq_(self.cfi, self.v.cfi)
eq_(self.vid, self.v.vid)
eq_(self.ethertype, self.v.ethertype)
def test_parser(self):
res, ptype, _ = self.v.parser(self.buf)
eq_(res.pcp, self.pcp)
eq_(res.cfi, self.cfi)
eq_(res.vid, self.vid)
eq_(res.ethertype, self.ethertype)
eq_(ptype, ipv4)
def test_serialize(self):
data = bytearray()
prev = None
buf = self.v.serialize(data, prev)
fmt = vlan._PACK_STR
res = struct.unpack(fmt, buf)
eq_(res[0], self.tci)
eq_(res[1], self.ethertype)
def _build_vlan(self):
src_mac = '00:07:0d:af:f4:54'
dst_mac = '00:00:00:00:00:00'
ethertype = ether.ETH_TYPE_8021Q
e = ethernet(dst_mac, src_mac, ethertype)
version = 4
header_length = 20
tos = 0
total_length = 24
identification = 0x8a5d
flags = 0
offset = 1480
ttl = 64
proto = inet.IPPROTO_ICMP
csum = 0xa7f2
src = '131.151.32.21'
dst = '131.151.32.129'
option = b'TEST'
ip = ipv4(version, header_length, tos, total_length, identification,
flags, offset, ttl, proto, csum, src, dst, option)
p = Packet()
p.add_protocol(e)
p.add_protocol(self.v)
p.add_protocol(ip)
p.serialize()
return p
def test_build_vlan(self):
p = self._build_vlan()
e = self.find_protocol(p, "ethernet")
ok_(e)
eq_(e.ethertype, ether.ETH_TYPE_8021Q)
v = self.find_protocol(p, "vlan")
ok_(v)
eq_(v.ethertype, ether.ETH_TYPE_IP)
ip = self.find_protocol(p, "ipv4")
ok_(ip)
eq_(v.pcp, self.pcp)
eq_(v.cfi, self.cfi)
eq_(v.vid, self.vid)
eq_(v.ethertype, self.ethertype)
@raises(Exception)
def test_malformed_vlan(self):
m_short_buf = self.buf[1:vlan._MIN_LEN]
vlan.parser(m_short_buf)
def test_json(self):
jsondict = self.v.to_jsondict()
v = vlan.from_jsondict(jsondict['vlan'])
eq_(str(self.v), str(v))
class Test_svlan(unittest.TestCase):
pcp = 0
cfi = 0
vid = 32
tci = pcp << 15 | cfi << 12 | vid
ethertype = ether.ETH_TYPE_8021Q
buf = pack(svlan._PACK_STR, tci, ethertype)
sv = svlan(pcp, cfi, vid, ethertype)
def setUp(self):
pass
def tearDown(self):
pass
def find_protocol(self, pkt, name):
for p in pkt.protocols:
if p.protocol_name == name:
return p
def test_init(self):
eq_(self.pcp, self.sv.pcp)
eq_(self.cfi, self.sv.cfi)
eq_(self.vid, self.sv.vid)
eq_(self.ethertype, self.sv.ethertype)
def test_parser(self):
res, ptype, _ = self.sv.parser(self.buf)
eq_(res.pcp, self.pcp)
eq_(res.cfi, self.cfi)
eq_(res.vid, self.vid)
eq_(res.ethertype, self.ethertype)
eq_(ptype, vlan)
def test_serialize(self):
data = bytearray()
prev = None
buf = self.sv.serialize(data, prev)
fmt = svlan._PACK_STR
res = struct.unpack(fmt, buf)
eq_(res[0], self.tci)
eq_(res[1], self.ethertype)
def _build_svlan(self):
src_mac = '00:07:0d:af:f4:54'
dst_mac = '00:00:00:00:00:00'
ethertype = ether.ETH_TYPE_8021AD
e = ethernet(dst_mac, src_mac, ethertype)
pcp = 0
cfi = 0
vid = 32
tci = pcp << 15 | cfi << 12 | vid
ethertype = ether.ETH_TYPE_IP
v = vlan(pcp, cfi, vid, ethertype)
version = 4
header_length = 20
tos = 0
total_length = 24
identification = 0x8a5d
flags = 0
offset = 1480
ttl = 64
proto = inet.IPPROTO_ICMP
csum = 0xa7f2
src = '131.151.32.21'
dst = '131.151.32.129'
option = b'TEST'
ip = ipv4(version, header_length, tos, total_length, identification,
flags, offset, ttl, proto, csum, src, dst, option)
p = Packet()
p.add_protocol(e)
p.add_protocol(self.sv)
p.add_protocol(v)
p.add_protocol(ip)
p.serialize()
return p
def test_build_svlan(self):
p = self._build_svlan()
e = self.find_protocol(p, "ethernet")
ok_(e)
eq_(e.ethertype, ether.ETH_TYPE_8021AD)
sv = self.find_protocol(p, "svlan")
ok_(sv)
eq_(sv.ethertype, ether.ETH_TYPE_8021Q)
v = self.find_protocol(p, "vlan")
ok_(v)
eq_(v.ethertype, ether.ETH_TYPE_IP)
ip = self.find_protocol(p, "ipv4")
ok_(ip)
eq_(sv.pcp, self.pcp)
eq_(sv.cfi, self.cfi)
eq_(sv.vid, self.vid)
eq_(sv.ethertype, self.ethertype)
@raises(Exception)
def test_malformed_svlan(self):
m_short_buf = self.buf[1:svlan._MIN_LEN]
svlan.parser(m_short_buf)
def test_json(self):
jsondict = self.sv.to_jsondict()
sv = svlan.from_jsondict(jsondict['svlan'])
eq_(str(self.sv), str(sv))
| apache-2.0 |
webmull/phantomjs | src/breakpad/src/third_party/protobuf/protobuf/python/mox.py | 603 | 38237 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is used for testing. The original is at:
# http://code.google.com/p/pymox/
"""Mox, an object-mocking framework for Python.
Mox works in the record-replay-verify paradigm. When you first create
a mock object, it is in record mode. You then programmatically set
the expected behavior of the mock object (what methods are to be
called on it, with what parameters, what they should return, and in
what order).
Once you have set up the expected mock behavior, you put it in replay
mode. Now the mock responds to method calls just as you told it to.
If an unexpected method (or an expected method with unexpected
parameters) is called, then an exception will be raised.
Once you are done interacting with the mock, you need to verify that
all the expected interactions occured. (Maybe your code exited
prematurely without calling some cleanup method!) The verify phase
ensures that every expected method was called; otherwise, an exception
will be raised.
Suggested usage / workflow:
# Create Mox factory
my_mox = Mox()
# Create a mock data access object
mock_dao = my_mox.CreateMock(DAOClass)
# Set up expected behavior
mock_dao.RetrievePersonWithIdentifier('1').AndReturn(person)
mock_dao.DeletePerson(person)
# Put mocks in replay mode
my_mox.ReplayAll()
# Inject mock object and run test
controller.SetDao(mock_dao)
controller.DeletePersonById('1')
# Verify all methods were called as expected
my_mox.VerifyAll()
"""
from collections import deque
import re
import types
import unittest
import stubout
class Error(AssertionError):
"""Base exception for this module."""
pass
class ExpectedMethodCallsError(Error):
"""Raised when Verify() is called before all expected methods have been called
"""
def __init__(self, expected_methods):
"""Init exception.
Args:
# expected_methods: A sequence of MockMethod objects that should have been
# called.
expected_methods: [MockMethod]
Raises:
ValueError: if expected_methods contains no methods.
"""
if not expected_methods:
raise ValueError("There must be at least one expected method")
Error.__init__(self)
self._expected_methods = expected_methods
def __str__(self):
calls = "\n".join(["%3d. %s" % (i, m)
for i, m in enumerate(self._expected_methods)])
return "Verify: Expected methods never called:\n%s" % (calls,)
class UnexpectedMethodCallError(Error):
"""Raised when an unexpected method is called.
This can occur if a method is called with incorrect parameters, or out of the
specified order.
"""
def __init__(self, unexpected_method, expected):
"""Init exception.
Args:
# unexpected_method: MockMethod that was called but was not at the head of
# the expected_method queue.
# expected: MockMethod or UnorderedGroup the method should have
# been in.
unexpected_method: MockMethod
expected: MockMethod or UnorderedGroup
"""
Error.__init__(self)
self._unexpected_method = unexpected_method
self._expected = expected
def __str__(self):
return "Unexpected method call: %s. Expecting: %s" % \
(self._unexpected_method, self._expected)
class UnknownMethodCallError(Error):
"""Raised if an unknown method is requested of the mock object."""
def __init__(self, unknown_method_name):
"""Init exception.
Args:
# unknown_method_name: Method call that is not part of the mocked class's
# public interface.
unknown_method_name: str
"""
Error.__init__(self)
self._unknown_method_name = unknown_method_name
def __str__(self):
return "Method called is not a member of the object: %s" % \
self._unknown_method_name
class Mox(object):
"""Mox: a factory for creating mock objects."""
# A list of types that should be stubbed out with MockObjects (as
# opposed to MockAnythings).
_USE_MOCK_OBJECT = [types.ClassType, types.InstanceType, types.ModuleType,
types.ObjectType, types.TypeType]
def __init__(self):
"""Initialize a new Mox."""
self._mock_objects = []
self.stubs = stubout.StubOutForTesting()
def CreateMock(self, class_to_mock):
"""Create a new mock object.
Args:
# class_to_mock: the class to be mocked
class_to_mock: class
Returns:
MockObject that can be used as the class_to_mock would be.
"""
new_mock = MockObject(class_to_mock)
self._mock_objects.append(new_mock)
return new_mock
def CreateMockAnything(self):
"""Create a mock that will accept any method calls.
This does not enforce an interface.
"""
new_mock = MockAnything()
self._mock_objects.append(new_mock)
return new_mock
def ReplayAll(self):
"""Set all mock objects to replay mode."""
for mock_obj in self._mock_objects:
mock_obj._Replay()
def VerifyAll(self):
"""Call verify on all mock objects created."""
for mock_obj in self._mock_objects:
mock_obj._Verify()
def ResetAll(self):
"""Call reset on all mock objects. This does not unset stubs."""
for mock_obj in self._mock_objects:
mock_obj._Reset()
def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):
"""Replace a method, attribute, etc. with a Mock.
This will replace a class or module with a MockObject, and everything else
(method, function, etc) with a MockAnything. This can be overridden to
always use a MockAnything by setting use_mock_anything to True.
Args:
obj: A Python object (class, module, instance, callable).
attr_name: str. The name of the attribute to replace with a mock.
use_mock_anything: bool. True if a MockAnything should be used regardless
of the type of attribute.
"""
attr_to_replace = getattr(obj, attr_name)
if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything:
stub = self.CreateMock(attr_to_replace)
else:
stub = self.CreateMockAnything()
self.stubs.Set(obj, attr_name, stub)
def UnsetStubs(self):
"""Restore stubs to their original state."""
self.stubs.UnsetAll()
def Replay(*args):
"""Put mocks into Replay mode.
Args:
# args is any number of mocks to put into replay mode.
"""
for mock in args:
mock._Replay()
def Verify(*args):
"""Verify mocks.
Args:
# args is any number of mocks to be verified.
"""
for mock in args:
mock._Verify()
def Reset(*args):
"""Reset mocks.
Args:
# args is any number of mocks to be reset.
"""
for mock in args:
mock._Reset()
class MockAnything:
"""A mock that can be used to mock anything.
This is helpful for mocking classes that do not provide a public interface.
"""
def __init__(self):
""" """
self._Reset()
def __getattr__(self, method_name):
"""Intercept method calls on this object.
A new MockMethod is returned that is aware of the MockAnything's
state (record or replay). The call will be recorded or replayed
by the MockMethod's __call__.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return self._CreateMockMethod(method_name)
def _CreateMockMethod(self, method_name):
"""Create a new mock method call and return it.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return MockMethod(method_name, self._expected_calls_queue,
self._replay_mode)
def __nonzero__(self):
"""Return 1 for nonzero so the mock can be used as a conditional."""
return 1
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockAnything) and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __ne__(self, rhs):
"""Provide custom logic to compare objects."""
return not self == rhs
def _Replay(self):
"""Start replaying expected method calls."""
self._replay_mode = True
def _Verify(self):
"""Verify that all of the expected calls have been made.
Raises:
ExpectedMethodCallsError: if there are still more method calls in the
expected queue.
"""
# If the list of expected calls is not empty, raise an exception
if self._expected_calls_queue:
# The last MultipleTimesGroup is not popped from the queue.
if (len(self._expected_calls_queue) == 1 and
isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and
self._expected_calls_queue[0].IsSatisfied()):
pass
else:
raise ExpectedMethodCallsError(self._expected_calls_queue)
def _Reset(self):
"""Reset the state of this mock to record mode with an empty queue."""
# Maintain a list of method calls we are expecting
self._expected_calls_queue = deque()
# Make sure we are in setup mode, not replay mode
self._replay_mode = False
class MockObject(MockAnything, object):
"""A mock object that simulates the public/protected interface of a class."""
def __init__(self, class_to_mock):
"""Initialize a mock object.
This determines the methods and properties of the class and stores them.
Args:
# class_to_mock: class to be mocked
class_to_mock: class
"""
# This is used to hack around the mixin/inheritance of MockAnything, which
# is not a proper object (it can be anything. :-)
MockAnything.__dict__['__init__'](self)
# Get a list of all the public and special methods we should mock.
self._known_methods = set()
self._known_vars = set()
self._class_to_mock = class_to_mock
for method in dir(class_to_mock):
if callable(getattr(class_to_mock, method)):
self._known_methods.add(method)
else:
self._known_vars.add(method)
def __getattr__(self, name):
"""Intercept attribute request on this object.
If the attribute is a public class variable, it will be returned and not
recorded as a call.
If the attribute is not a variable, it is handled like a method
call. The method name is checked against the set of mockable
methods, and a new MockMethod is returned that is aware of the
MockObject's state (record or replay). The call will be recorded
or replayed by the MockMethod's __call__.
Args:
# name: the name of the attribute being requested.
name: str
Returns:
Either a class variable or a new MockMethod that is aware of the state
of the mock (record or replay).
Raises:
UnknownMethodCallError if the MockObject does not mock the requested
method.
"""
if name in self._known_vars:
return getattr(self._class_to_mock, name)
if name in self._known_methods:
return self._CreateMockMethod(name)
raise UnknownMethodCallError(name)
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockObject) and
self._class_to_mock == rhs._class_to_mock and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __setitem__(self, key, value):
"""Provide custom logic for mocking classes that support item assignment.
Args:
key: Key to set the value for.
value: Value to set.
Returns:
Expected return value in replay mode. A MockMethod object for the
__setitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class does not support item assignment.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
setitem = self._class_to_mock.__dict__.get('__setitem__', None)
# Verify the class supports item assignment.
if setitem is None:
raise TypeError('object does not support item assignment')
# If we are in replay mode then simply call the mock __setitem__ method.
if self._replay_mode:
return MockMethod('__setitem__', self._expected_calls_queue,
self._replay_mode)(key, value)
# Otherwise, create a mock method __setitem__.
return self._CreateMockMethod('__setitem__')(key, value)
def __getitem__(self, key):
"""Provide custom logic for mocking classes that are subscriptable.
Args:
key: Key to return the value for.
Returns:
Expected return value in replay mode. A MockMethod object for the
__getitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class is not subscriptable.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
getitem = self._class_to_mock.__dict__.get('__getitem__', None)
# Verify the class supports item assignment.
if getitem is None:
raise TypeError('unsubscriptable object')
# If we are in replay mode then simply call the mock __getitem__ method.
if self._replay_mode:
return MockMethod('__getitem__', self._expected_calls_queue,
self._replay_mode)(key)
# Otherwise, create a mock method __getitem__.
return self._CreateMockMethod('__getitem__')(key)
def __call__(self, *params, **named_params):
"""Provide custom logic for mocking classes that are callable."""
# Verify the class we are mocking is callable
callable = self._class_to_mock.__dict__.get('__call__', None)
if callable is None:
raise TypeError('Not callable')
# Because the call is happening directly on this object instead of a method,
# the call on the mock method is made right here
mock_method = self._CreateMockMethod('__call__')
return mock_method(*params, **named_params)
@property
def __class__(self):
"""Return the class that is being mocked."""
return self._class_to_mock
class MockMethod(object):
"""Callable mock method.
A MockMethod should act exactly like the method it mocks, accepting parameters
and returning a value, or throwing an exception (as specified). When this
method is called, it can optionally verify whether the called method (name and
signature) matches the expected method.
"""
def __init__(self, method_name, call_queue, replay_mode):
"""Construct a new mock method.
Args:
# method_name: the name of the method
# call_queue: deque of calls, verify this call against the head, or add
# this call to the queue.
# replay_mode: False if we are recording, True if we are verifying calls
# against the call queue.
method_name: str
call_queue: list or deque
replay_mode: bool
"""
self._name = method_name
self._call_queue = call_queue
if not isinstance(call_queue, deque):
self._call_queue = deque(self._call_queue)
self._replay_mode = replay_mode
self._params = None
self._named_params = None
self._return_value = None
self._exception = None
self._side_effects = None
def __call__(self, *params, **named_params):
"""Log parameters and return the specified return value.
If the Mock(Anything/Object) associated with this call is in record mode,
this MockMethod will be pushed onto the expected call queue. If the mock
is in replay mode, this will pop a MockMethod off the top of the queue and
verify this call is equal to the expected call.
Raises:
UnexpectedMethodCall if this call is supposed to match an expected method
call and it does not.
"""
self._params = params
self._named_params = named_params
if not self._replay_mode:
self._call_queue.append(self)
return self
expected_method = self._VerifyMethodCall()
if expected_method._side_effects:
expected_method._side_effects(*params, **named_params)
if expected_method._exception:
raise expected_method._exception
return expected_method._return_value
def __getattr__(self, name):
"""Raise an AttributeError with a helpful message."""
raise AttributeError('MockMethod has no attribute "%s". '
'Did you remember to put your mocks in replay mode?' % name)
def _PopNextMethod(self):
"""Pop the next method from our call queue."""
try:
return self._call_queue.popleft()
except IndexError:
raise UnexpectedMethodCallError(self, None)
def _VerifyMethodCall(self):
"""Verify the called method is expected.
This can be an ordered method, or part of an unordered set.
Returns:
The expected mock method.
Raises:
UnexpectedMethodCall if the method called was not expected.
"""
expected = self._PopNextMethod()
# Loop here, because we might have a MethodGroup followed by another
# group.
while isinstance(expected, MethodGroup):
expected, method = expected.MethodCalled(self)
if method is not None:
return method
# This is a mock method, so just check equality.
if expected != self:
raise UnexpectedMethodCallError(self, expected)
return expected
def __str__(self):
params = ', '.join(
[repr(p) for p in self._params or []] +
['%s=%r' % x for x in sorted((self._named_params or {}).items())])
desc = "%s(%s) -> %r" % (self._name, params, self._return_value)
return desc
def __eq__(self, rhs):
"""Test whether this MockMethod is equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return (isinstance(rhs, MockMethod) and
self._name == rhs._name and
self._params == rhs._params and
self._named_params == rhs._named_params)
def __ne__(self, rhs):
"""Test whether this MockMethod is not equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return not self == rhs
def GetPossibleGroup(self):
"""Returns a possible group from the end of the call queue or None if no
other methods are on the stack.
"""
# Remove this method from the tail of the queue so we can add it to a group.
this_method = self._call_queue.pop()
assert this_method == self
# Determine if the tail of the queue is a group, or just a regular ordered
# mock method.
group = None
try:
group = self._call_queue[-1]
except IndexError:
pass
return group
def _CheckAndCreateNewGroup(self, group_name, group_class):
"""Checks if the last method (a possible group) is an instance of our
group_class. Adds the current method to this group or creates a new one.
Args:
group_name: the name of the group.
group_class: the class used to create instance of this new group
"""
group = self.GetPossibleGroup()
# If this is a group, and it is the correct group, add the method.
if isinstance(group, group_class) and group.group_name() == group_name:
group.AddMethod(self)
return self
# Create a new group and add the method.
new_group = group_class(group_name)
new_group.AddMethod(self)
self._call_queue.append(new_group)
return self
def InAnyOrder(self, group_name="default"):
"""Move this method into a group of unordered calls.
A group of unordered calls must be defined together, and must be executed
in full before the next expected method can be called. There can be
multiple groups that are expected serially, if they are given
different group names. The same group name can be reused if there is a
standard method call, or a group with a different name, spliced between
usages.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, UnorderedGroup)
def MultipleTimes(self, group_name="default"):
"""Move this method into group of calls which may be called multiple times.
A group of repeating calls must be defined together, and must be executed in
full before the next expected mehtod can be called.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, MultipleTimesGroup)
def AndReturn(self, return_value):
"""Set the value to return when this method is called.
Args:
# return_value can be anything.
"""
self._return_value = return_value
return return_value
def AndRaise(self, exception):
"""Set the exception to raise when this method is called.
Args:
# exception: the exception to raise when this method is called.
exception: Exception
"""
self._exception = exception
def WithSideEffects(self, side_effects):
"""Set the side effects that are simulated when this method is called.
Args:
side_effects: A callable which modifies the parameters or other relevant
state which a given test case depends on.
Returns:
Self for chaining with AndReturn and AndRaise.
"""
self._side_effects = side_effects
return self
class Comparator:
"""Base class for all Mox comparators.
A Comparator can be used as a parameter to a mocked method when the exact
value is not known. For example, the code you are testing might build up a
long SQL string that is passed to your mock DAO. You're only interested that
the IN clause contains the proper primary keys, so you can set your mock
up as follows:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
Now whatever query is passed in must contain the string 'IN (1, 2, 4, 5)'.
A Comparator may replace one or more parameters, for example:
# return at most 10 rows
mock_dao.RunQuery(StrContains('SELECT'), 10)
or
# Return some non-deterministic number of rows
mock_dao.RunQuery(StrContains('SELECT'), IsA(int))
"""
def equals(self, rhs):
"""Special equals method that all comparators must implement.
Args:
rhs: any python object
"""
raise NotImplementedError, 'method must be implemented by a subclass.'
def __eq__(self, rhs):
return self.equals(rhs)
def __ne__(self, rhs):
return not self.equals(rhs)
class IsA(Comparator):
"""This class wraps a basic Python type or class. It is used to verify
that a parameter is of the given type or class.
Example:
mock_dao.Connect(IsA(DbConnectInfo))
"""
def __init__(self, class_name):
"""Initialize IsA
Args:
class_name: basic python type or a class
"""
self._class_name = class_name
def equals(self, rhs):
"""Check to see if the RHS is an instance of class_name.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return isinstance(rhs, self._class_name)
except TypeError:
# Check raw types if there was a type error. This is helpful for
# things like cStringIO.StringIO.
return type(rhs) == type(self._class_name)
def __repr__(self):
return str(self._class_name)
class IsAlmost(Comparator):
"""Comparison class used to check whether a parameter is nearly equal
to a given value. Generally useful for floating point numbers.
Example mock_dao.SetTimeout((IsAlmost(3.9)))
"""
def __init__(self, float_value, places=7):
"""Initialize IsAlmost.
Args:
float_value: The value for making the comparison.
places: The number of decimal places to round to.
"""
self._float_value = float_value
self._places = places
def equals(self, rhs):
"""Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool
"""
try:
return round(rhs-self._float_value, self._places) == 0
except TypeError:
# This is probably because either float_value or rhs is not a number.
return False
def __repr__(self):
return str(self._float_value)
class StrContains(Comparator):
"""Comparison class used to check whether a substring exists in a
string parameter. This can be useful in mocking a database with SQL
passed in as a string parameter, for example.
Example:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
"""
def __init__(self, search_string):
"""Initialize.
Args:
# search_string: the string you are searching for
search_string: str
"""
self._search_string = search_string
def equals(self, rhs):
"""Check to see if the search_string is contained in the rhs string.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return rhs.find(self._search_string) > -1
except Exception:
return False
def __repr__(self):
return '<str containing \'%s\'>' % self._search_string
class Regex(Comparator):
"""Checks if a string matches a regular expression.
This uses a given regular expression to determine equality.
"""
def __init__(self, pattern, flags=0):
"""Initialize.
Args:
# pattern is the regular expression to search for
pattern: str
# flags passed to re.compile function as the second argument
flags: int
"""
self.regex = re.compile(pattern, flags=flags)
def equals(self, rhs):
"""Check to see if rhs matches regular expression pattern.
Returns:
bool
"""
return self.regex.search(rhs) is not None
def __repr__(self):
s = '<regular expression \'%s\'' % self.regex.pattern
if self.regex.flags:
s += ', flags=%d' % self.regex.flags
s += '>'
return s
class In(Comparator):
"""Checks whether an item (or key) is in a list (or dict) parameter.
Example:
mock_dao.GetUsersInfo(In('expectedUserName')).AndReturn(mock_result)
"""
def __init__(self, key):
"""Initialize.
Args:
# key is any thing that could be in a list or a key in a dict
"""
self._key = key
def equals(self, rhs):
"""Check to see whether key is in rhs.
Args:
rhs: dict
Returns:
bool
"""
return self._key in rhs
def __repr__(self):
return '<sequence or map containing \'%s\'>' % self._key
class ContainsKeyValue(Comparator):
"""Checks whether a key/value pair is in a dict parameter.
Example:
mock_dao.UpdateUsers(ContainsKeyValue('stevepm', stevepm_user_info))
"""
def __init__(self, key, value):
"""Initialize.
Args:
# key: a key in a dict
# value: the corresponding value
"""
self._key = key
self._value = value
def equals(self, rhs):
"""Check whether the given key/value pair is in the rhs dict.
Returns:
bool
"""
try:
return rhs[self._key] == self._value
except Exception:
return False
def __repr__(self):
return '<map containing the entry \'%s: %s\'>' % (self._key, self._value)
class SameElementsAs(Comparator):
"""Checks whether iterables contain the same elements (ignoring order).
Example:
mock_dao.ProcessUsers(SameElementsAs('stevepm', 'salomaki'))
"""
def __init__(self, expected_seq):
"""Initialize.
Args:
expected_seq: a sequence
"""
self._expected_seq = expected_seq
def equals(self, actual_seq):
"""Check to see whether actual_seq has same elements as expected_seq.
Args:
actual_seq: sequence
Returns:
bool
"""
try:
expected = dict([(element, None) for element in self._expected_seq])
actual = dict([(element, None) for element in actual_seq])
except TypeError:
# Fall back to slower list-compare if any of the objects are unhashable.
expected = list(self._expected_seq)
actual = list(actual_seq)
expected.sort()
actual.sort()
return expected == actual
def __repr__(self):
return '<sequence with same elements as \'%s\'>' % self._expected_seq
class And(Comparator):
"""Evaluates one or more Comparators on RHS and returns an AND of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Comparator
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether all Comparators are equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if not comparator.equals(rhs):
return False
return True
def __repr__(self):
return '<AND %s>' % str(self._comparators)
class Or(Comparator):
"""Evaluates one or more Comparators on RHS and returns an OR of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Mox comparators
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if comparator.equals(rhs):
return True
return False
def __repr__(self):
return '<OR %s>' % str(self._comparators)
class Func(Comparator):
"""Call a function that should verify the parameter passed in is correct.
You may need the ability to perform more advanced operations on the parameter
in order to validate it. You can use this to have a callable validate any
parameter. The callable should return either True or False.
Example:
def myParamValidator(param):
# Advanced logic here
return True
mock_dao.DoSomething(Func(myParamValidator), true)
"""
def __init__(self, func):
"""Initialize.
Args:
func: callable that takes one parameter and returns a bool
"""
self._func = func
def equals(self, rhs):
"""Test whether rhs passes the function test.
rhs is passed into func.
Args:
rhs: any python object
Returns:
the result of func(rhs)
"""
return self._func(rhs)
def __repr__(self):
return str(self._func)
class IgnoreArg(Comparator):
"""Ignore an argument.
This can be used when we don't care about an argument of a method call.
Example:
# Check if CastMagic is called with 3 as first arg and 'disappear' as third.
mymock.CastMagic(3, IgnoreArg(), 'disappear')
"""
def equals(self, unused_rhs):
"""Ignores arguments and returns True.
Args:
unused_rhs: any python object
Returns:
always returns True
"""
return True
def __repr__(self):
return '<IgnoreArg>'
class MethodGroup(object):
"""Base class containing common behaviour for MethodGroups."""
def __init__(self, group_name):
self._group_name = group_name
def group_name(self):
return self._group_name
def __str__(self):
return '<%s "%s">' % (self.__class__.__name__, self._group_name)
def AddMethod(self, mock_method):
raise NotImplementedError
def MethodCalled(self, mock_method):
raise NotImplementedError
def IsSatisfied(self):
raise NotImplementedError
class UnorderedGroup(MethodGroup):
"""UnorderedGroup holds a set of method calls that may occur in any order.
This construct is helpful for non-deterministic events, such as iterating
over the keys of a dict.
"""
def __init__(self, group_name):
super(UnorderedGroup, self).__init__(group_name)
self._methods = []
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.append(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so, remove it from the set
# and return it.
for method in self._methods:
if method == mock_method:
# Remove the called mock_method instead of the method in the group.
# The called method will match any comparators when equality is checked
# during removal. The method in the group could pass a comparator to
# another comparator during the equality check.
self._methods.remove(mock_method)
# If this group is not empty, put it back at the head of the queue.
if not self.IsSatisfied():
mock_method._call_queue.appendleft(self)
return self, method
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if there are not any methods in this group."""
return len(self._methods) == 0
class MultipleTimesGroup(MethodGroup):
"""MultipleTimesGroup holds methods that may be called any number of times.
Note: Each method must be called at least once.
This is helpful, if you don't know or care how many times a method is called.
"""
def __init__(self, group_name):
super(MultipleTimesGroup, self).__init__(group_name)
self._methods = set()
self._methods_called = set()
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.add(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so add it to the set of
# called methods.
for method in self._methods:
if method == mock_method:
self._methods_called.add(mock_method)
# Always put this group back on top of the queue, because we don't know
# when we are done.
mock_method._call_queue.appendleft(self)
return self, method
if self.IsSatisfied():
next_method = mock_method._PopNextMethod();
return next_method, None
else:
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if all methods in this group are called at least once."""
# NOTE(psycho): We can't use the simple set difference here because we want
# to match different parameters which are considered the same e.g. IsA(str)
# and some string. This solution is O(n^2) but n should be small.
tmp = self._methods.copy()
for called in self._methods_called:
for expected in tmp:
if called == expected:
tmp.remove(expected)
if not tmp:
return True
break
return False
class MoxMetaTestBase(type):
"""Metaclass to add mox cleanup and verification to every test.
As the mox unit testing class is being constructed (MoxTestBase or a
subclass), this metaclass will modify all test functions to call the
CleanUpMox method of the test class after they finish. This means that
unstubbing and verifying will happen for every test with no additional code,
and any failures will result in test failures as opposed to errors.
"""
def __init__(cls, name, bases, d):
type.__init__(cls, name, bases, d)
# also get all the attributes from the base classes to account
# for a case when test class is not the immediate child of MoxTestBase
for base in bases:
for attr_name in dir(base):
d[attr_name] = getattr(base, attr_name)
for func_name, func in d.items():
if func_name.startswith('test') and callable(func):
setattr(cls, func_name, MoxMetaTestBase.CleanUpTest(cls, func))
@staticmethod
def CleanUpTest(cls, func):
"""Adds Mox cleanup code to any MoxTestBase method.
Always unsets stubs after a test. Will verify all mocks for tests that
otherwise pass.
Args:
cls: MoxTestBase or subclass; the class whose test method we are altering.
func: method; the method of the MoxTestBase test class we wish to alter.
Returns:
The modified method.
"""
def new_method(self, *args, **kwargs):
mox_obj = getattr(self, 'mox', None)
cleanup_mox = False
if mox_obj and isinstance(mox_obj, Mox):
cleanup_mox = True
try:
func(self, *args, **kwargs)
finally:
if cleanup_mox:
mox_obj.UnsetStubs()
if cleanup_mox:
mox_obj.VerifyAll()
new_method.__name__ = func.__name__
new_method.__doc__ = func.__doc__
new_method.__module__ = func.__module__
return new_method
class MoxTestBase(unittest.TestCase):
"""Convenience test class to make stubbing easier.
Sets up a "mox" attribute which is an instance of Mox - any mox tests will
want this. Also automatically unsets any stubs and verifies that all mock
methods have been called at the end of each test, eliminating boilerplate
code.
"""
__metaclass__ = MoxMetaTestBase
def setUp(self):
self.mox = Mox()
| bsd-3-clause |
abridgett/boto | boto/ec2/image.py | 92 | 16222 | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.ec2.ec2object import EC2Object, TaggedEC2Object
from boto.ec2.blockdevicemapping import BlockDeviceMapping
class ProductCodes(list):
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'productCode':
self.append(value)
class BillingProducts(list):
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'billingProduct':
self.append(value)
class Image(TaggedEC2Object):
"""
Represents an EC2 Image
"""
def __init__(self, connection=None):
super(Image, self).__init__(connection)
self.id = None
self.location = None
self.state = None
self.ownerId = None # for backwards compatibility
self.owner_id = None
self.owner_alias = None
self.is_public = False
self.architecture = None
self.platform = None
self.type = None
self.kernel_id = None
self.ramdisk_id = None
self.name = None
self.description = None
self.product_codes = ProductCodes()
self.billing_products = BillingProducts()
self.block_device_mapping = None
self.root_device_type = None
self.root_device_name = None
self.virtualization_type = None
self.hypervisor = None
self.instance_lifecycle = None
self.sriov_net_support = None
def __repr__(self):
return 'Image:%s' % self.id
def startElement(self, name, attrs, connection):
retval = super(Image, self).startElement(name, attrs, connection)
if retval is not None:
return retval
if name == 'blockDeviceMapping':
self.block_device_mapping = BlockDeviceMapping()
return self.block_device_mapping
elif name == 'productCodes':
return self.product_codes
elif name == 'billingProducts':
return self.billing_products
else:
return None
def endElement(self, name, value, connection):
if name == 'imageId':
self.id = value
elif name == 'imageLocation':
self.location = value
elif name == 'imageState':
self.state = value
elif name == 'imageOwnerId':
self.ownerId = value # for backwards compatibility
self.owner_id = value
elif name == 'isPublic':
if value == 'false':
self.is_public = False
elif value == 'true':
self.is_public = True
else:
raise Exception(
'Unexpected value of isPublic %s for image %s' % (
value,
self.id
)
)
elif name == 'architecture':
self.architecture = value
elif name == 'imageType':
self.type = value
elif name == 'kernelId':
self.kernel_id = value
elif name == 'ramdiskId':
self.ramdisk_id = value
elif name == 'imageOwnerAlias':
self.owner_alias = value
elif name == 'platform':
self.platform = value
elif name == 'name':
self.name = value
elif name == 'description':
self.description = value
elif name == 'rootDeviceType':
self.root_device_type = value
elif name == 'rootDeviceName':
self.root_device_name = value
elif name == 'virtualizationType':
self.virtualization_type = value
elif name == 'hypervisor':
self.hypervisor = value
elif name == 'instanceLifecycle':
self.instance_lifecycle = value
elif name == 'sriovNetSupport':
self.sriov_net_support = value
else:
setattr(self, name, value)
def _update(self, updated):
self.__dict__.update(updated.__dict__)
def update(self, validate=False, dry_run=False):
"""
Update the image's state information by making a call to fetch
the current image attributes from the service.
:type validate: bool
:param validate: By default, if EC2 returns no data about the
image the update method returns quietly. If
the validate param is True, however, it will
raise a ValueError exception if no data is
returned from EC2.
"""
rs = self.connection.get_all_images([self.id], dry_run=dry_run)
if len(rs) > 0:
img = rs[0]
if img.id == self.id:
self._update(img)
elif validate:
raise ValueError('%s is not a valid Image ID' % self.id)
return self.state
def run(self, min_count=1, max_count=1, key_name=None,
security_groups=None, user_data=None,
addressing_type=None, instance_type='m1.small', placement=None,
kernel_id=None, ramdisk_id=None,
monitoring_enabled=False, subnet_id=None,
block_device_map=None,
disable_api_termination=False,
instance_initiated_shutdown_behavior=None,
private_ip_address=None,
placement_group=None, security_group_ids=None,
additional_info=None, instance_profile_name=None,
instance_profile_arn=None, tenancy=None, dry_run=False):
"""
Runs this instance.
:type min_count: int
:param min_count: The minimum number of instances to start
:type max_count: int
:param max_count: The maximum number of instances to start
:type key_name: string
:param key_name: The name of the key pair with which to
launch instances.
:type security_groups: list of strings
:param security_groups: The names of the security groups with which to
associate instances.
:type user_data: string
:param user_data: The Base64-encoded MIME user data to be made
available to the instance(s) in this reservation.
:type instance_type: string
:param instance_type: The type of instance to run:
* t1.micro
* m1.small
* m1.medium
* m1.large
* m1.xlarge
* m3.medium
* m3.large
* m3.xlarge
* m3.2xlarge
* c1.medium
* c1.xlarge
* m2.xlarge
* m2.2xlarge
* m2.4xlarge
* cr1.8xlarge
* hi1.4xlarge
* hs1.8xlarge
* cc1.4xlarge
* cg1.4xlarge
* cc2.8xlarge
* g2.2xlarge
* c3.large
* c3.xlarge
* c3.2xlarge
* c3.4xlarge
* c3.8xlarge
* i2.xlarge
* i2.2xlarge
* i2.4xlarge
* i2.8xlarge
* t2.micro
* t2.small
* t2.medium
:type placement: string
:param placement: The Availability Zone to launch the instance into.
:type kernel_id: string
:param kernel_id: The ID of the kernel with which to launch the
instances.
:type ramdisk_id: string
:param ramdisk_id: The ID of the RAM disk with which to launch the
instances.
:type monitoring_enabled: bool
:param monitoring_enabled: Enable CloudWatch monitoring on
the instance.
:type subnet_id: string
:param subnet_id: The subnet ID within which to launch the instances
for VPC.
:type private_ip_address: string
:param private_ip_address: If you're using VPC, you can
optionally use this parameter to assign the instance a
specific available IP address from the subnet (e.g.,
10.0.0.25).
:type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping`
:param block_device_map: A BlockDeviceMapping data structure
describing the EBS volumes associated with the Image.
:type disable_api_termination: bool
:param disable_api_termination: If True, the instances will be locked
and will not be able to be terminated via the API.
:type instance_initiated_shutdown_behavior: string
:param instance_initiated_shutdown_behavior: Specifies whether the
instance stops or terminates on instance-initiated shutdown.
Valid values are:
* stop
* terminate
:type placement_group: string
:param placement_group: If specified, this is the name of the placement
group in which the instance(s) will be launched.
:type additional_info: string
:param additional_info: Specifies additional information to make
available to the instance(s).
:type security_group_ids: list of strings
:param security_group_ids: The ID of the VPC security groups with
which to associate instances.
:type instance_profile_name: string
:param instance_profile_name: The name of
the IAM Instance Profile (IIP) to associate with the instances.
:type instance_profile_arn: string
:param instance_profile_arn: The Amazon resource name (ARN) of
the IAM Instance Profile (IIP) to associate with the instances.
:type tenancy: string
:param tenancy: The tenancy of the instance you want to
launch. An instance with a tenancy of 'dedicated' runs on
single-tenant hardware and can only be launched into a
VPC. Valid values are:"default" or "dedicated".
NOTE: To use dedicated tenancy you MUST specify a VPC
subnet-ID as well.
:rtype: Reservation
:return: The :class:`boto.ec2.instance.Reservation` associated with
the request for machines
"""
return self.connection.run_instances(self.id, min_count, max_count,
key_name, security_groups,
user_data, addressing_type,
instance_type, placement,
kernel_id, ramdisk_id,
monitoring_enabled, subnet_id,
block_device_map, disable_api_termination,
instance_initiated_shutdown_behavior,
private_ip_address, placement_group,
security_group_ids=security_group_ids,
additional_info=additional_info,
instance_profile_name=instance_profile_name,
instance_profile_arn=instance_profile_arn,
tenancy=tenancy, dry_run=dry_run)
def deregister(self, delete_snapshot=False, dry_run=False):
return self.connection.deregister_image(
self.id,
delete_snapshot,
dry_run=dry_run
)
def get_launch_permissions(self, dry_run=False):
img_attrs = self.connection.get_image_attribute(
self.id,
'launchPermission',
dry_run=dry_run
)
return img_attrs.attrs
def set_launch_permissions(self, user_ids=None, group_names=None,
dry_run=False):
return self.connection.modify_image_attribute(self.id,
'launchPermission',
'add',
user_ids,
group_names,
dry_run=dry_run)
def remove_launch_permissions(self, user_ids=None, group_names=None,
dry_run=False):
return self.connection.modify_image_attribute(self.id,
'launchPermission',
'remove',
user_ids,
group_names,
dry_run=dry_run)
def reset_launch_attributes(self, dry_run=False):
return self.connection.reset_image_attribute(
self.id,
'launchPermission',
dry_run=dry_run
)
def get_kernel(self, dry_run=False):
img_attrs = self.connection.get_image_attribute(
self.id,
'kernel',
dry_run=dry_run
)
return img_attrs.kernel
def get_ramdisk(self, dry_run=False):
img_attrs = self.connection.get_image_attribute(
self.id,
'ramdisk',
dry_run=dry_run
)
return img_attrs.ramdisk
class ImageAttribute(object):
def __init__(self, parent=None):
self.name = None
self.kernel = None
self.ramdisk = None
self.attrs = {}
def startElement(self, name, attrs, connection):
if name == 'blockDeviceMapping':
self.attrs['block_device_mapping'] = BlockDeviceMapping()
return self.attrs['block_device_mapping']
else:
return None
def endElement(self, name, value, connection):
if name == 'launchPermission':
self.name = 'launch_permission'
elif name == 'group':
if 'groups' in self.attrs:
self.attrs['groups'].append(value)
else:
self.attrs['groups'] = [value]
elif name == 'userId':
if 'user_ids' in self.attrs:
self.attrs['user_ids'].append(value)
else:
self.attrs['user_ids'] = [value]
elif name == 'productCode':
if 'product_codes' in self.attrs:
self.attrs['product_codes'].append(value)
else:
self.attrs['product_codes'] = [value]
elif name == 'imageId':
self.image_id = value
elif name == 'kernel':
self.kernel = value
elif name == 'ramdisk':
self.ramdisk = value
else:
setattr(self, name, value)
class CopyImage(object):
def __init__(self, parent=None):
self._parent = parent
self.image_id = None
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'imageId':
self.image_id = value
| mit |
miptliot/edx-platform | openedx/core/djangoapps/content/block_structure/tests/test_manager.py | 5 | 9624 | """
Tests for manager.py
"""
import ddt
from nose.plugins.attrib import attr
from unittest import TestCase
from ..block_structure import BlockStructureBlockData
from ..config import RAISE_ERROR_WHEN_NOT_FOUND, STORAGE_BACKING_FOR_CACHE, waffle
from ..exceptions import UsageKeyNotInBlockStructure, BlockStructureNotFound
from ..manager import BlockStructureManager
from ..transformers import BlockStructureTransformers
from .helpers import (
MockModulestoreFactory, MockCache, MockTransformer,
ChildrenMapTestMixin, UsageKeyFactoryMixin,
mock_registered_transformers,
)
class TestTransformer1(MockTransformer):
"""
Test Transformer class with basic functionality to verify collected and
transformed data.
"""
collect_data_key = 't1.collect'
transform_data_key = 't1.transform'
collect_call_count = 0
@classmethod
def collect(cls, block_structure):
"""
Collects block data for the block structure.
"""
cls._set_block_values(block_structure, cls.collect_data_key)
cls.collect_call_count += 1
def transform(self, usage_info, block_structure):
"""
Transforms the block structure.
"""
self._set_block_values(block_structure, self.transform_data_key)
@classmethod
def assert_collected(cls, block_structure):
"""
Asserts data was collected for the block structure.
"""
cls._assert_block_values(block_structure, cls.collect_data_key)
@classmethod
def assert_transformed(cls, block_structure):
"""
Asserts the block structure was transformed.
"""
cls._assert_block_values(block_structure, cls.transform_data_key)
@classmethod
def _set_block_values(cls, block_structure, data_key):
"""
Sets a value for each block in the given structure, using the given
data key.
"""
for block_key in block_structure.topological_traversal():
block_structure.set_transformer_block_field(
block_key, cls, data_key, cls._create_block_value(block_key, data_key)
)
@classmethod
def _assert_block_values(cls, block_structure, data_key):
"""
Verifies the value for each block in the given structure, for the given
data key.
"""
for block_key in block_structure.topological_traversal():
assert (
block_structure.get_transformer_block_field(
block_key,
cls,
data_key,
) == cls._create_block_value(block_key, data_key)
)
@classmethod
def _create_block_value(cls, block_key, data_key):
"""
Returns a unique deterministic value for the given block key
and data key.
"""
return data_key + 't1.val1.' + unicode(block_key)
@attr(shard=2)
@ddt.ddt
class TestBlockStructureManager(UsageKeyFactoryMixin, ChildrenMapTestMixin, TestCase):
"""
Test class for BlockStructureManager.
"""
def setUp(self):
super(TestBlockStructureManager, self).setUp()
TestTransformer1.collect_call_count = 0
self.registered_transformers = [TestTransformer1()]
with mock_registered_transformers(self.registered_transformers):
self.transformers = BlockStructureTransformers(self.registered_transformers)
self.children_map = self.SIMPLE_CHILDREN_MAP
self.modulestore = MockModulestoreFactory.create(self.children_map, self.block_key_factory)
self.cache = MockCache()
self.bs_manager = BlockStructureManager(self.block_key_factory(0), self.modulestore, self.cache)
def collect_and_verify(self, expect_modulestore_called, expect_cache_updated):
"""
Calls the manager's get_collected method and verifies its result
and behavior.
"""
self.modulestore.get_items_call_count = 0
self.cache.set_call_count = 0
with mock_registered_transformers(self.registered_transformers):
block_structure = self.bs_manager.get_collected()
self.assert_block_structure(block_structure, self.children_map)
TestTransformer1.assert_collected(block_structure)
if expect_modulestore_called:
self.assertGreater(self.modulestore.get_items_call_count, 0)
else:
self.assertEquals(self.modulestore.get_items_call_count, 0)
self.assertEquals(self.cache.set_call_count, 1 if expect_cache_updated else 0)
def test_get_transformed(self):
with mock_registered_transformers(self.registered_transformers):
block_structure = self.bs_manager.get_transformed(self.transformers)
self.assert_block_structure(block_structure, self.children_map)
TestTransformer1.assert_collected(block_structure)
TestTransformer1.assert_transformed(block_structure)
def test_get_transformed_with_starting_block(self):
with mock_registered_transformers(self.registered_transformers):
block_structure = self.bs_manager.get_transformed(
self.transformers,
starting_block_usage_key=self.block_key_factory(1),
)
substructure_of_children_map = [[], [3, 4], [], [], []]
self.assert_block_structure(block_structure, substructure_of_children_map, missing_blocks=[0, 2])
TestTransformer1.assert_collected(block_structure)
TestTransformer1.assert_transformed(block_structure)
def test_get_transformed_with_collected(self):
with mock_registered_transformers(self.registered_transformers):
collected_block_structure = self.bs_manager.get_collected()
# using the same collected block structure,
# transform at different starting blocks
for (starting_block, expected_structure, expected_missing_blocks) in [
(0, [[1, 2], [3, 4], [], [], []], []),
(1, [[], [3, 4], [], [], []], [0, 2]),
(2, [[], [], [], [], []], [0, 1, 3, 4]),
]:
block_structure = self.bs_manager.get_transformed(
self.transformers,
starting_block_usage_key=self.block_key_factory(starting_block),
collected_block_structure=collected_block_structure,
)
self.assert_block_structure(block_structure, expected_structure, missing_blocks=expected_missing_blocks)
def test_get_transformed_with_nonexistent_starting_block(self):
with mock_registered_transformers(self.registered_transformers):
with self.assertRaises(UsageKeyNotInBlockStructure):
self.bs_manager.get_transformed(self.transformers, starting_block_usage_key=100)
def test_get_collected_cached(self):
self.collect_and_verify(expect_modulestore_called=True, expect_cache_updated=True)
self.collect_and_verify(expect_modulestore_called=False, expect_cache_updated=False)
self.assertEquals(TestTransformer1.collect_call_count, 1)
def test_get_collected_error_raised(self):
with waffle().override(RAISE_ERROR_WHEN_NOT_FOUND, active=True):
with mock_registered_transformers(self.registered_transformers):
with self.assertRaises(BlockStructureNotFound):
self.bs_manager.get_collected()
@ddt.data(True, False)
def test_update_collected_if_needed(self, with_storage_backing):
with waffle().override(STORAGE_BACKING_FOR_CACHE, active=with_storage_backing):
with mock_registered_transformers(self.registered_transformers):
self.assertEquals(TestTransformer1.collect_call_count, 0)
self.bs_manager.update_collected_if_needed()
self.assertEquals(TestTransformer1.collect_call_count, 1)
self.bs_manager.update_collected_if_needed()
self.assertEquals(TestTransformer1.collect_call_count, 1 if with_storage_backing else 2)
self.collect_and_verify(expect_modulestore_called=False, expect_cache_updated=False)
def test_get_collected_transformer_version(self):
self.collect_and_verify(expect_modulestore_called=True, expect_cache_updated=True)
# transformer code writes new schema version; data not re-collected
TestTransformer1.WRITE_VERSION += 1
self.collect_and_verify(expect_modulestore_called=False, expect_cache_updated=False)
# transformer code requires new schema version; data re-collected
TestTransformer1.READ_VERSION += 1
self.collect_and_verify(expect_modulestore_called=True, expect_cache_updated=True)
# old transformer code can read new schema version; data not re-collected
TestTransformer1.READ_VERSION -= 1
self.collect_and_verify(expect_modulestore_called=False, expect_cache_updated=False)
self.assertEquals(TestTransformer1.collect_call_count, 2)
def test_get_collected_structure_version(self):
self.collect_and_verify(expect_modulestore_called=True, expect_cache_updated=True)
BlockStructureBlockData.VERSION += 1
self.collect_and_verify(expect_modulestore_called=True, expect_cache_updated=True)
self.assertEquals(TestTransformer1.collect_call_count, 2)
def test_clear(self):
self.collect_and_verify(expect_modulestore_called=True, expect_cache_updated=True)
self.bs_manager.clear()
self.collect_and_verify(expect_modulestore_called=True, expect_cache_updated=True)
self.assertEquals(TestTransformer1.collect_call_count, 2)
| agpl-3.0 |
AICP/external_chromium_org | tools/cr/cr/actions/gyp.py | 59 | 1257 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module to add gyp support to cr."""
import cr
import os
GYP_DEFINE_PREFIX = 'GYP_DEF_'
class GypPrepareOut(cr.PrepareOut):
"""A prepare action that runs gyp whenever you select an output directory."""
ENABLED = cr.Config.From(
GYP_GENERATORS='ninja',
GYP_GENERATOR_FLAGS='output_dir={CR_OUT_BASE} config={CR_BUILDTYPE}',
GYP_DEF_target_arch='{CR_ENVSETUP_ARCH}',
)
def UpdateContext(self):
# Collapse GYP_DEFINES from all GYP_DEF prefixes
gyp_defines = cr.context.Find('GYP_DEFINES') or ''
for key, value in cr.context.exported.items():
if key.startswith(GYP_DEFINE_PREFIX):
gyp_defines += ' %s=%s' % (key[len(GYP_DEFINE_PREFIX):], value)
cr.context['GYP_DEFINES'] = gyp_defines.strip()
if cr.context.verbose >= 1:
print cr.context.Substitute('GYP_DEFINES = {GYP_DEFINES}')
def Prepare(self):
if cr.context.verbose >= 1:
print cr.context.Substitute('Invoking gyp with {GYP_GENERATOR_FLAGS}')
cr.Host.Execute(
'{CR_SRC}/build/gyp_chromium',
'--depth={CR_SRC}',
'--check'
)
| bsd-3-clause |
Peddle/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/gis/gdal/prototypes/srs.py | 219 | 3498 | from ctypes import c_char_p, c_int, c_void_p, POINTER
from django.contrib.gis.gdal.libgdal import lgdal, std_call
from django.contrib.gis.gdal.prototypes.generation import (const_string_output,
double_output, int_output, srs_output, string_output, void_output)
## Shortcut generation for routines with known parameters.
def srs_double(f):
"""
Creates a function prototype for the OSR routines that take
the OSRSpatialReference object and
"""
return double_output(f, [c_void_p, POINTER(c_int)], errcheck=True)
def units_func(f):
"""
Creates a ctypes function prototype for OSR units functions, e.g.,
OSRGetAngularUnits, OSRGetLinearUnits.
"""
return double_output(f, [c_void_p, POINTER(c_char_p)], strarg=True)
# Creation & destruction.
clone_srs = srs_output(std_call('OSRClone'), [c_void_p])
new_srs = srs_output(std_call('OSRNewSpatialReference'), [c_char_p])
release_srs = void_output(lgdal.OSRRelease, [c_void_p], errcheck=False)
destroy_srs = void_output(std_call('OSRDestroySpatialReference'), [c_void_p], errcheck=False)
srs_validate = void_output(lgdal.OSRValidate, [c_void_p])
# Getting the semi_major, semi_minor, and flattening functions.
semi_major = srs_double(lgdal.OSRGetSemiMajor)
semi_minor = srs_double(lgdal.OSRGetSemiMinor)
invflattening = srs_double(lgdal.OSRGetInvFlattening)
# WKT, PROJ, EPSG, XML importation routines.
from_wkt = void_output(lgdal.OSRImportFromWkt, [c_void_p, POINTER(c_char_p)])
from_proj = void_output(lgdal.OSRImportFromProj4, [c_void_p, c_char_p])
from_epsg = void_output(std_call('OSRImportFromEPSG'), [c_void_p, c_int])
from_xml = void_output(lgdal.OSRImportFromXML, [c_void_p, c_char_p])
from_user_input = void_output(std_call('OSRSetFromUserInput'), [c_void_p, c_char_p])
# Morphing to/from ESRI WKT.
morph_to_esri = void_output(lgdal.OSRMorphToESRI, [c_void_p])
morph_from_esri = void_output(lgdal.OSRMorphFromESRI, [c_void_p])
# Identifying the EPSG
identify_epsg = void_output(lgdal.OSRAutoIdentifyEPSG, [c_void_p])
# Getting the angular_units, linear_units functions
linear_units = units_func(lgdal.OSRGetLinearUnits)
angular_units = units_func(lgdal.OSRGetAngularUnits)
# For exporting to WKT, PROJ.4, "Pretty" WKT, and XML.
to_wkt = string_output(std_call('OSRExportToWkt'), [c_void_p, POINTER(c_char_p)], decoding='ascii')
to_proj = string_output(std_call('OSRExportToProj4'), [c_void_p, POINTER(c_char_p)], decoding='ascii')
to_pretty_wkt = string_output(std_call('OSRExportToPrettyWkt'), [c_void_p, POINTER(c_char_p), c_int], offset=-2, decoding='ascii')
# Memory leak fixed in GDAL 1.5; still exists in 1.4.
to_xml = string_output(lgdal.OSRExportToXML, [c_void_p, POINTER(c_char_p), c_char_p], offset=-2, decoding='ascii')
# String attribute retrival routines.
get_attr_value = const_string_output(std_call('OSRGetAttrValue'), [c_void_p, c_char_p, c_int], decoding='ascii')
get_auth_name = const_string_output(lgdal.OSRGetAuthorityName, [c_void_p, c_char_p], decoding='ascii')
get_auth_code = const_string_output(lgdal.OSRGetAuthorityCode, [c_void_p, c_char_p], decoding='ascii')
# SRS Properties
isgeographic = int_output(lgdal.OSRIsGeographic, [c_void_p])
islocal = int_output(lgdal.OSRIsLocal, [c_void_p])
isprojected = int_output(lgdal.OSRIsProjected, [c_void_p])
# Coordinate transformation
new_ct= srs_output(std_call('OCTNewCoordinateTransformation'), [c_void_p, c_void_p])
destroy_ct = void_output(std_call('OCTDestroyCoordinateTransformation'), [c_void_p], errcheck=False)
| apache-2.0 |
naturalness/sensibility | tests/strategies.py | 1 | 1264 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright 2017 Eddie Antonio Santos <easantos@ualberta.ca>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hypothesis.strategies import lists, integers, composite # type: ignore
from sensibility.source_vector import SourceVector
from sensibility.language import language
@composite
def programs(draw):
"""
Generate SourceVector instances with random sequences of vectors.
TODO: rename to source_vectors()? Ew, kinda gross.
"""
vocabulary = language.vocabulary
tokens = integers(min_value=vocabulary.minimum_representable_index(),
max_value=vocabulary.maximum_representable_index())
vectors = draw(lists(tokens, min_size=1))
return SourceVector(vectors)
| apache-2.0 |
pokerregion/poker | tests/handhistory/test_stars.py | 1 | 23153 | from decimal import Decimal
from datetime import datetime
import pytz
import pytest
from poker.card import Card
from poker.hand import Combo
from poker.constants import Currency, GameType, Game, Limit, Action, MoneyType
from poker.handhistory import _Player, _PlayerAction
from poker.room.pokerstars import PokerStarsHandHistory, _Street
from . import stars_hands
ET = pytz.timezone("US/Eastern")
@pytest.fixture
def hand(request):
"""Parse handhistory defined in hand_text class attribute
and returns a PokerStarsHandHistory instance.
"""
hh = PokerStarsHandHistory(request.instance.hand_text)
hh.parse()
return hh
@pytest.fixture
def hand_header(request):
"""Parse hand history header only defined in hand_text
and returns a PokerStarsHandHistory instance.
"""
hh = PokerStarsHandHistory(request.instance.hand_text)
hh.parse_header()
return hh
@pytest.fixture(scope="module")
def flop():
return _Street(
[
"[2s 6d 6h]",
"W2lkm2n: bets 80",
"MISTRPerfect: folds",
"Uncalled bet (80) returned to W2lkm2n",
"W2lkm2n collected 150 from pot",
"W2lkm2n: doesn't show hand",
],
0,
)
def test_open_from_file(testdir):
bbb_path = str(testdir.joinpath("handhistory/bbb.txt"))
hh = PokerStarsHandHistory.from_file(bbb_path)
hh.parse()
assert hh.ident == "138364355489"
assert type(hh.raw) is str
class TestHandHeaderNoLimitHoldemTourFreeroll:
hand_text = """
PokerStars Hand #152455023342: Tournament #1545783901, Freeroll Hold'em No Limit - Level I (10/20) - 2016/04/25 23:22:00 BRT [2016/04/25 22:22:00 ET]
""" # noqa
@pytest.mark.parametrize(
("attribute", "expected_value"),
[
("ident", "152455023342"),
("game_type", GameType.TOUR),
("tournament_ident", "1545783901"),
("tournament_level", "I"),
("currency", Currency("USD")),
("buyin", Decimal("0")),
("rake", Decimal("0")),
("game", Game.HOLDEM),
("limit", Limit.NL),
("sb", Decimal(10)),
("bb", Decimal(20)),
("date", ET.localize(datetime(2016, 4, 25, 22, 22, 0))),
("extra", {"money_type": MoneyType.REAL}),
],
)
def test_values_after_header_parsed(self, hand_header, attribute, expected_value):
assert getattr(hand_header, attribute) == expected_value
class TestHandHeaderNoLimitHoldemTourPlayMoney:
hand_text = """
PokerStars Hand #152504147861: Tournament #1545751329, 870+130 Hold'em No Limit - Level I (10/20) - 2016/04/27 1:17:16 BRT [2016/04/27 0:17:16 ET]
""" # noqa
@pytest.mark.parametrize(
("attribute", "expected_value"),
[
("ident", "152504147861"),
("game_type", GameType.TOUR),
("tournament_ident", "1545751329"),
("tournament_level", "I"),
("currency", None),
("buyin", Decimal("870")),
("rake", Decimal("130")),
("game", Game.HOLDEM),
("limit", Limit.NL),
("sb", Decimal(10)),
("bb", Decimal(20)),
("date", ET.localize(datetime(2016, 4, 27, 00, 17, 16))),
("extra", {"money_type": MoneyType.PLAY}),
],
)
def test_values_after_header_parsed(self, hand_header, attribute, expected_value):
assert getattr(hand_header, attribute) == expected_value
class TestHandHeaderLimitHoldemCashPlayMoney:
hand_text = """
PokerStars Hand #153769972916: Hold'em Limit (10/20) - 2016/05/24 8:52:39 BRT [2016/05/24 7:52:39 ET]
""" # noqa
@pytest.mark.parametrize(
("attribute", "expected_value"),
[
("ident", "153769972916"),
("game_type", GameType.CASH),
("tournament_ident", None),
("tournament_level", None),
("currency", None),
("buyin", None),
("rake", None),
("game", Game.HOLDEM),
("limit", Limit.FL),
("sb", Decimal(10)),
("bb", Decimal(20)),
("date", ET.localize(datetime(2016, 5, 24, 7, 52, 39))),
("extra", {"money_type": MoneyType.PLAY}),
],
)
def test_values_after_header_parsed(self, hand_header, attribute, expected_value):
assert getattr(hand_header, attribute) == expected_value
class TestHandHeaderNoLimitHoldemTourStarcoin:
hand_text = """
PokerStars Hand #153719873192: Tournament #1573768726, 184 SC Hold'em No Limit - Level I (25/50) - 2016/05/23 6:48:22 BRT [2016/05/23 5:48:22 ET]
""" # noqa
@pytest.mark.parametrize(
("attribute", "expected_value"),
[
("ident", "153719873192"),
("game_type", GameType.TOUR),
("tournament_ident", "1573768726"),
("tournament_level", "I"),
("currency", Currency.STARS_COIN),
("buyin", Decimal(184)),
("rake", Decimal(0)),
("game", Game.HOLDEM),
("limit", Limit.NL),
("sb", Decimal(25)),
("bb", Decimal(50)),
("date", ET.localize(datetime(2016, 5, 23, 5, 48, 22))),
("extra", {"money_type": MoneyType.REAL}),
],
)
def test_values_after_header_parsed(self, hand_header, attribute, expected_value):
assert getattr(hand_header, attribute) == expected_value
class TestHandHeaderPotLimitOmahaCash:
hand_text = """
PokerStars Hand #107030112846: Omaha Pot Limit ($0.01/$0.02 USD) - 2013/11/15 9:03:10 AWST [2013/11/14 20:03:10 ET]
""" # noqa
@pytest.mark.parametrize(
("attribute", "expected_value"),
[
("ident", "107030112846"),
("game_type", GameType.CASH),
("tournament_ident", None),
("tournament_level", None),
("currency", Currency.USD),
("buyin", None),
("rake", None),
("game", Game.OMAHA),
("limit", Limit.PL),
("sb", Decimal("0.01")),
("bb", Decimal("0.02")),
("date", ET.localize(datetime(2013, 11, 14, 20, 3, 10))),
("extra", {"money_type": MoneyType.REAL}),
],
)
def test_values_after_header_parsed(self, hand_header, attribute, expected_value):
assert getattr(hand_header, attribute) == expected_value
class TestHandWithFlopOnly:
hand_text = stars_hands.HAND1
# in py.test 2.4 it is recommended to use string like "attribute,expected",
# but with tuple, it works in both 2.3.5 and 2.4
@pytest.mark.parametrize(
("attribute", "expected_value"),
[
("ident", "105024000105"),
("game_type", GameType.TOUR),
("tournament_ident", "797469411"),
("tournament_level", "I"),
("currency", Currency.USD),
("buyin", Decimal("3.19")),
("rake", Decimal("0.31")),
("game", Game.HOLDEM),
("limit", Limit.NL),
("sb", Decimal(10)),
("bb", Decimal(20)),
("date", ET.localize(datetime(2013, 10, 4, 13, 53, 27))),
],
)
def test_values_after_header_parsed(self, hand_header, attribute, expected_value):
assert getattr(hand_header, attribute) == expected_value
@pytest.mark.parametrize(
("attribute", "expected_value"),
[
("table_name", "797469411 15"),
("max_players", 9),
("button", _Player(name="flettl2", stack=1500, seat=1, combo=None)),
("hero", _Player(name="W2lkm2n", stack=3000, seat=5, combo=Combo("AcJh"))),
(
"players",
[
_Player(name="flettl2", stack=1500, seat=1, combo=None),
_Player(name="santy312", stack=3000, seat=2, combo=None),
_Player(name="flavio766", stack=3000, seat=3, combo=None),
_Player(name="strongi82", stack=3000, seat=4, combo=None),
_Player(name="W2lkm2n", stack=3000, seat=5, combo=Combo("AcJh")),
_Player(name="MISTRPerfect", stack=3000, seat=6, combo=None),
_Player(name="blak_douglas", stack=3000, seat=7, combo=None),
_Player(name="sinus91", stack=1500, seat=8, combo=None),
_Player(name="STBIJUJA", stack=1500, seat=9, combo=None),
],
),
("turn", None),
("river", None),
("board", (Card("2s"), Card("6d"), Card("6h"))),
(
"preflop_actions",
(
"strongi82: folds",
"W2lkm2n: raises 40 to 60",
"MISTRPerfect: calls 60",
"blak_douglas: folds",
"sinus91: folds",
"STBIJUJA: folds",
"flettl2: folds",
"santy312: folds",
"flavio766: folds",
),
),
("turn_actions", None),
("river_actions", None),
("total_pot", Decimal(150)),
("show_down", False),
("winners", ("W2lkm2n",)),
],
)
def test_body(self, hand, attribute, expected_value):
assert getattr(hand, attribute) == expected_value
@pytest.mark.parametrize(
("attribute", "expected_value"),
[
(
"actions",
(
_PlayerAction("W2lkm2n", Action.BET, Decimal(80)),
_PlayerAction("MISTRPerfect", Action.FOLD, None),
_PlayerAction("W2lkm2n", Action.RETURN, Decimal(80)),
_PlayerAction("W2lkm2n", Action.WIN, Decimal(150)),
_PlayerAction("W2lkm2n", Action.MUCK, None),
),
),
("cards", (Card("2s"), Card("6d"), Card("6h"))),
("is_rainbow", True),
("is_monotone", False),
("is_triplet", False),
# TODO: http://www.pokerology.com/lessons/flop-texture/
# assert flop.is_dry
("has_pair", True),
("has_straightdraw", False),
("has_gutshot", True),
("has_flushdraw", False),
("players", ("W2lkm2n", "MISTRPerfect")),
("pot", Decimal(150)),
],
)
def test_flop_attributes(self, hand, attribute, expected_value):
assert getattr(hand.flop, attribute) == expected_value
def test_flop(self, hand):
assert isinstance(hand.flop, _Street)
class TestAllinPreflopHand:
hand_text = stars_hands.HAND2
@pytest.mark.parametrize(
("attribute", "expected_value"),
[
("ident", "105034215446"),
("game_type", GameType.TOUR),
("tournament_ident", "797536898"),
("tournament_level", "XI"),
("currency", Currency.USD),
("buyin", Decimal("3.19")),
("rake", Decimal("0.31")),
("game", Game.HOLDEM),
("limit", Limit.NL),
("sb", Decimal(400)),
("bb", Decimal(800)),
("date", ET.localize(datetime(2013, 10, 4, 17, 22, 20))),
],
)
def test_values_after_header_parsed(self, hand_header, attribute, expected_value):
assert getattr(hand_header, attribute) == expected_value
@pytest.mark.parametrize(
("attribute", "expected_value"),
[
("table_name", "797536898 9"),
("max_players", 9),
(
"button",
_Player(name="W2lkm2n", stack=11815, seat=2, combo=Combo("JdJs")),
),
("hero", _Player(name="W2lkm2n", stack=11815, seat=2, combo=Combo("JdJs"))),
(
"players",
[
_Player(name="RichFatWhale", stack=12910, seat=1, combo=None),
_Player(name="W2lkm2n", stack=11815, seat=2, combo=Combo("JdJs")),
_Player(name="Labahra", stack=7395, seat=3, combo=None),
_Player(name="Lean Abadia", stack=7765, seat=4, combo=None),
_Player(name="lkenny44", stack=10080, seat=5, combo=None),
_Player(name="Newfie_187", stack=1030, seat=6, combo=None),
_Player(name="Hokolix", stack=13175, seat=7, combo=None),
_Player(name="pmmr", stack=2415, seat=8, combo=None),
_Player(name="costamar", stack=13070, seat=9, combo=None),
],
),
("turn", Card("8d")),
("river", Card("Ks")),
("board", (Card("3c"), Card("6s"), Card("9d"), Card("8d"), Card("Ks"))),
(
"preflop_actions",
(
"lkenny44: folds",
"Newfie_187: raises 155 to 955 and is all-in",
"Hokolix: folds",
"pmmr: folds",
"costamar: raises 12040 to 12995 and is all-in",
"RichFatWhale: folds",
"W2lkm2n: calls 11740 and is all-in",
"Labahra: folds",
"Lean Abadia: folds",
"Uncalled bet (1255) returned to costamar",
),
),
("turn_actions", None),
("river_actions", None),
("total_pot", Decimal(26310)),
("show_down", True),
("winners", ("costamar",)),
],
)
def test_body(self, hand, attribute, expected_value):
assert getattr(hand, attribute) == expected_value
@pytest.mark.parametrize(
("attribute", "expected_value"),
[
("actions", None),
("cards", (Card("3c"), Card("6s"), Card("9d"))),
("is_rainbow", True),
("is_monotone", False),
("is_triplet", False),
# TODO: http://www.pokerology.com/lessons/flop-texture/
# assert flop.is_dry
("has_pair", False),
("has_straightdraw", True),
("has_gutshot", True),
("has_flushdraw", False),
("players", None),
],
)
def test_flop_attributes(self, hand, attribute, expected_value):
assert getattr(hand.flop, attribute) == expected_value
def test_flop(self, hand):
assert isinstance(hand.flop, _Street)
@pytest.mark.xfail
def test_flop_pot(self, hand):
assert hand.flop.pot == Decimal(26310)
class TestBodyMissingPlayerNoBoard:
hand_text = stars_hands.HAND3
@pytest.mark.parametrize(
("attribute", "expected_value"),
[
("ident", "105026771696"),
("game_type", GameType.TOUR),
("tournament_ident", "797469411"),
("tournament_level", "X"),
("currency", Currency.USD),
("buyin", Decimal("3.19")),
("rake", Decimal("0.31")),
("game", Game.HOLDEM),
("limit", Limit.NL),
("sb", Decimal(300)),
("bb", Decimal(600)),
("date", ET.localize(datetime(2013, 10, 4, 14, 50, 56))),
],
)
def test_values_after_header_parsed(self, hand_header, attribute, expected_value):
assert getattr(hand_header, attribute) == expected_value
@pytest.mark.parametrize(
("attribute", "expected_value"),
[
("table_name", "797469411 11"),
("max_players", 9),
(
"button",
_Player(name="W2lkm2n", stack=10714, seat=8, combo=Combo("6d8d")),
),
("hero", _Player(name="W2lkm2n", stack=10714, seat=8, combo=Combo("6d8d"))),
(
"players",
[
_Player(name="Empty Seat 1", stack=0, seat=1, combo=None),
_Player(name="snelle_jel", stack=4295, seat=2, combo=None),
_Player(name="EuSh0wTelm0", stack=11501, seat=3, combo=None),
_Player(name="panost3", stack=7014, seat=4, combo=None),
_Player(name="Samovlyblen", stack=7620, seat=5, combo=None),
_Player(name="Theralion", stack=4378, seat=6, combo=None),
_Player(name="wrsport1015", stack=9880, seat=7, combo=None),
_Player(name="W2lkm2n", stack=10714, seat=8, combo=Combo("6d8d")),
_Player(name="fischero68", stack=8724, seat=9, combo=None),
],
),
("turn", None),
("river", None),
("board", None),
(
"preflop_actions",
(
"EuSh0wTelm0: folds",
"panost3: folds",
"Samovlyblen: folds",
"Theralion: raises 600 to 1200",
"wrsport1015: folds",
"W2lkm2n: folds",
"fischero68: folds",
"snelle_jel: folds",
"Uncalled bet (600) returned to Theralion",
"Theralion collected 1900 from pot",
"Theralion: doesn't show hand",
),
),
("turn_actions", None),
("river_actions", None),
("total_pot", Decimal(1900)),
("show_down", False),
("winners", ("Theralion",)),
],
)
def test_body(self, hand, attribute, expected_value):
assert getattr(hand, attribute) == expected_value
def test_flop(self, hand):
assert hand.flop is None
class TestBodyEveryStreet:
hand_text = stars_hands.HAND4
@pytest.mark.parametrize(
("attribute", "expected_value"),
[
("ident", "105025168298"),
("game_type", GameType.TOUR),
("tournament_ident", "797469411"),
("tournament_level", "IV"),
("currency", Currency.USD),
("buyin", Decimal("3.19")),
("rake", Decimal("0.31")),
("game", Game.HOLDEM),
("limit", Limit.NL),
("sb", Decimal(50)),
("bb", Decimal(100)),
("date", ET.localize(datetime(2013, 10, 4, 14, 19, 17))),
],
)
def test_values_after_header_parsed(self, hand_header, attribute, expected_value):
assert getattr(hand_header, attribute) == expected_value
@pytest.mark.parametrize(
("attribute", "expected_value"),
[
("table_name", "797469411 15"),
("max_players", 9),
(
"button",
_Player(name="W2lkm2n", stack=5145, seat=5, combo=Combo("Jc5c")),
),
("hero", _Player(name="W2lkm2n", stack=5145, seat=5, combo=Combo("Jc5c"))),
(
"players",
[
_Player(name="flettl2", stack=3000, seat=1, combo=None),
_Player(name="santy312", stack=5890, seat=2, combo=None),
_Player(name="flavio766", stack=11010, seat=3, combo=None),
_Player(name="strongi82", stack=2855, seat=4, combo=None),
_Player(name="W2lkm2n", stack=5145, seat=5, combo=Combo("Jc5c")),
_Player(name="MISTRPerfect", stack=2395, seat=6, combo=None),
_Player(name="blak_douglas", stack=3000, seat=7, combo=None),
_Player(name="sinus91", stack=3000, seat=8, combo=None),
_Player(name="STBIJUJA", stack=1205, seat=9, combo=None),
],
),
("turn", Card("8c")),
("river", Card("Kd")),
("board", (Card("6s"), Card("4d"), Card("3s"), Card("8c"), Card("Kd"))),
(
"preflop_actions",
(
"sinus91: folds",
"STBIJUJA: folds",
"flettl2: raises 125 to 225",
"santy312: folds",
"flavio766: folds",
"strongi82: folds",
"W2lkm2n: folds",
"MISTRPerfect: folds",
"blak_douglas: calls 125",
),
),
(
"turn_actions",
(
"blak_douglas: checks",
"flettl2: bets 250",
"blak_douglas: calls 250",
),
),
(
"river_actions",
(
"blak_douglas: checks",
"flettl2: bets 1300",
"blak_douglas: folds",
"Uncalled bet (1300) returned to flettl2",
"flettl2 collected 1300 from pot",
"flettl2: doesn't show hand",
),
),
("total_pot", Decimal(1300)),
("show_down", False),
("winners", ("flettl2",)),
],
)
def test_body(self, hand, attribute, expected_value):
assert getattr(hand, attribute) == expected_value
@pytest.mark.parametrize(
("attribute", "expected_value"),
[
(
"actions",
(
_PlayerAction("blak_douglas", Action.CHECK, None),
_PlayerAction("flettl2", Action.BET, Decimal(150)),
_PlayerAction("blak_douglas", Action.CALL, Decimal(150)),
),
),
("cards", (Card("6s"), Card("4d"), Card("3s"))),
("is_rainbow", False),
("is_monotone", False),
("is_triplet", False),
# TODO: http://www.pokerology.com/lessons/flop-texture/
# assert flop.is_dry
("has_pair", False),
("has_straightdraw", True),
("has_gutshot", True),
("has_flushdraw", True),
("players", ("blak_douglas", "flettl2")),
],
)
def test_flop_attributes(self, hand, attribute, expected_value):
assert getattr(hand.flop, attribute) == expected_value
def test_flop(self, hand):
assert isinstance(hand.flop, _Street)
@pytest.mark.xfail
def test_flop_pot(self, hand):
assert hand.flop.pot == Decimal(800)
class TestClassRepresentation:
hand_text = stars_hands.HAND1
def test_unicode(self, hand_header):
assert str(hand_header) == "<PokerStarsHandHistory: #105024000105>"
def test_str(self, hand_header):
assert str(hand_header) == "<PokerStarsHandHistory: #105024000105>"
class TestPlayerNameWithDot:
hand_text = stars_hands.HAND5
def test_player_is_in_player_list(self, hand):
assert ".prestige.U$" in [p.name for p in hand.players]
def test_player_stack(self, hand):
player_names = [p.name for p in hand.players]
player_index = player_names.index(".prestige.U$")
assert hand.players[player_index].stack == 3000
| mit |
Akrog/sqlalchemy | lib/sqlalchemy/dialects/mysql/pymysql.py | 59 | 1504 | # mysql/pymysql.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+pymysql
:name: PyMySQL
:dbapi: pymysql
:connectstring: mysql+pymysql://<username>:<password>@<host>/<dbname>\
[?<options>]
:url: http://www.pymysql.org/
Unicode
-------
Please see :ref:`mysql_unicode` for current recommendations on unicode
handling.
MySQL-Python Compatibility
--------------------------
The pymysql DBAPI is a pure Python port of the MySQL-python (MySQLdb) driver,
and targets 100% compatibility. Most behavioral notes for MySQL-python apply
to the pymysql driver as well.
"""
from .mysqldb import MySQLDialect_mysqldb
from ...util import py3k
class MySQLDialect_pymysql(MySQLDialect_mysqldb):
driver = 'pymysql'
description_encoding = None
# generally, these two values should be both True
# or both False. PyMySQL unicode tests pass all the way back
# to 0.4 either way. See [ticket:3337]
supports_unicode_statements = True
supports_unicode_binds = True
@classmethod
def dbapi(cls):
return __import__('pymysql')
if py3k:
def _extract_error_code(self, exception):
if isinstance(exception.args[0], Exception):
exception = exception.args[0]
return exception.args[0]
dialect = MySQLDialect_pymysql
| mit |
sarvex/tensorflow | tensorflow/python/keras/layers/preprocessing/benchmarks/normalization_adapt_benchmark.py | 6 | 4696 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for Keras text vectorization preprocessing layer's adapt method."""
import time
import numpy as np
from tensorflow.python import keras
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.keras.layers.preprocessing import normalization
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
v2_compat.enable_v2_behavior()
def reduce_fn(state, values):
"""tf.data.Dataset-friendly implementation of mean and variance."""
k, n, ex, ex2 = state
# If this is the first iteration, we pick the first value to be 'k',
# which helps with precision - we assume that k is close to an average
# value and calculate mean and variance with respect to that.
k = control_flow_ops.cond(math_ops.equal(n, 0), lambda: values[0], lambda: k)
sum_v = math_ops.reduce_sum(values, axis=0)
sum_v2 = math_ops.reduce_sum(math_ops.square(values), axis=0)
ones = array_ops.ones_like(values, dtype=dtypes.int32)
batch_size = math_ops.reduce_sum(ones, axis=0)
batch_size_f = math_ops.cast(batch_size, dtypes.float32)
ex = 0 + sum_v - math_ops.multiply(batch_size_f, k)
ex2 = 0 + sum_v2 + math_ops.multiply(
batch_size_f, (math_ops.square(k) -
math_ops.multiply(math_ops.multiply(2.0, k), sum_v)))
return (k, n + batch_size, ex, ex2)
class BenchmarkAdapt(benchmark.TensorFlowBenchmark):
"""Benchmark adapt."""
def run_dataset_implementation(self, num_elements, batch_size):
input_t = keras.Input(shape=(1,))
layer = normalization.Normalization()
_ = layer(input_t)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = dataset_ops.Dataset.range(num_elements)
ds = ds.map(
lambda x: array_ops.expand_dims(math_ops.cast(x, dtypes.float32), -1))
ds = ds.batch(batch_size)
starts.append(time.time())
# Benchmarked code begins here.
k, n, ex, ex2 = ds.reduce((0.0, 0, 0.0, 0.0), reduce_fn)
mean = k.numpy() + ex.numpy() / n.numpy()
var = (ex2.numpy() - (ex.numpy() * ex.numpy()) / n.numpy()) / (
n.numpy() - 1)
layer.set_weights([mean, var])
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts))
return avg_time
def bm_adapt_implementation(self, num_elements, batch_size):
"""Test the KPL adapt implementation."""
input_t = keras.Input(shape=(1,), dtype=dtypes.float32)
layer = normalization.Normalization()
_ = layer(input_t)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = dataset_ops.Dataset.range(num_elements)
ds = ds.map(
lambda x: array_ops.expand_dims(math_ops.cast(x, dtypes.float32), -1))
ds = ds.batch(batch_size)
starts.append(time.time())
# Benchmarked code begins here.
layer.adapt(ds)
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts))
name = "normalization_adapt|%s_elements|batch_%s" % (num_elements,
batch_size)
baseline = self.run_dataset_implementation(num_elements, batch_size)
extras = {
"tf.data implementation baseline": baseline,
"delta seconds": (baseline - avg_time),
"delta percent": ((baseline - avg_time) / baseline) * 100
}
self.report_benchmark(
iters=num_repeats, wall_time=avg_time, extras=extras, name=name)
def benchmark_vocab_size_by_batch(self):
for vocab_size in [100, 1000, 10000, 100000, 1000000]:
for batch in [1, 16, 2048]:
self.bm_adapt_implementation(vocab_size, batch)
if __name__ == "__main__":
test.main()
| apache-2.0 |
mKeRix/home-assistant | tests/components/smartthings/test_init.py | 6 | 18804 | """Tests for the SmartThings component init module."""
from uuid import uuid4
from aiohttp import ClientConnectionError, ClientResponseError
from pysmartthings import InstalledAppStatus, OAuthToken
import pytest
from homeassistant.components import cloud, smartthings
from homeassistant.components.smartthings.const import (
CONF_CLOUDHOOK_URL,
CONF_INSTALLED_APP_ID,
CONF_REFRESH_TOKEN,
DATA_BROKERS,
DOMAIN,
EVENT_BUTTON,
SIGNAL_SMARTTHINGS_UPDATE,
SUPPORTED_PLATFORMS,
)
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import HTTP_FORBIDDEN, HTTP_INTERNAL_SERVER_ERROR
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.setup import async_setup_component
from tests.async_mock import Mock, patch
from tests.common import MockConfigEntry
async def test_migration_creates_new_flow(hass, smartthings_mock, config_entry):
"""Test migration deletes app and creates new flow."""
assert await async_setup_component(hass, "persistent_notification", {})
config_entry.version = 1
config_entry.add_to_hass(hass)
await smartthings.async_migrate_entry(hass, config_entry)
await hass.async_block_till_done()
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
assert not hass.config_entries.async_entries(DOMAIN)
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
assert flows[0]["handler"] == "smartthings"
assert flows[0]["context"] == {"source": "import"}
async def test_unrecoverable_api_errors_create_new_flow(
hass, config_entry, smartthings_mock
):
"""
Test a new config flow is initiated when there are API errors.
401 (unauthorized): Occurs when the access token is no longer valid.
403 (forbidden/not found): Occurs when the app or installed app could
not be retrieved/found (likely deleted?)
"""
assert await async_setup_component(hass, "persistent_notification", {})
config_entry.add_to_hass(hass)
request_info = Mock(real_url="http://example.com")
smartthings_mock.app.side_effect = ClientResponseError(
request_info=request_info, history=None, status=401
)
# Assert setup returns false
result = await smartthings.async_setup_entry(hass, config_entry)
assert not result
# Assert entry was removed and new flow created
await hass.async_block_till_done()
assert not hass.config_entries.async_entries(DOMAIN)
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
assert flows[0]["handler"] == "smartthings"
assert flows[0]["context"] == {"source": "import"}
hass.config_entries.flow.async_abort(flows[0]["flow_id"])
async def test_recoverable_api_errors_raise_not_ready(
hass, config_entry, smartthings_mock
):
"""Test config entry not ready raised for recoverable API errors."""
config_entry.add_to_hass(hass)
request_info = Mock(real_url="http://example.com")
smartthings_mock.app.side_effect = ClientResponseError(
request_info=request_info, history=None, status=HTTP_INTERNAL_SERVER_ERROR
)
with pytest.raises(ConfigEntryNotReady):
await smartthings.async_setup_entry(hass, config_entry)
async def test_scenes_api_errors_raise_not_ready(
hass, config_entry, app, installed_app, smartthings_mock
):
"""Test if scenes are unauthorized we continue to load platforms."""
config_entry.add_to_hass(hass)
request_info = Mock(real_url="http://example.com")
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
smartthings_mock.scenes.side_effect = ClientResponseError(
request_info=request_info, history=None, status=HTTP_INTERNAL_SERVER_ERROR
)
with pytest.raises(ConfigEntryNotReady):
await smartthings.async_setup_entry(hass, config_entry)
async def test_connection_errors_raise_not_ready(hass, config_entry, smartthings_mock):
"""Test config entry not ready raised for connection errors."""
config_entry.add_to_hass(hass)
smartthings_mock.app.side_effect = ClientConnectionError()
with pytest.raises(ConfigEntryNotReady):
await smartthings.async_setup_entry(hass, config_entry)
async def test_base_url_no_longer_https_does_not_load(
hass, config_entry, app, smartthings_mock
):
"""Test base_url no longer valid creates a new flow."""
await async_process_ha_core_config(
hass, {"external_url": "http://example.local:8123"},
)
config_entry.add_to_hass(hass)
smartthings_mock.app.return_value = app
# Assert setup returns false
result = await smartthings.async_setup_entry(hass, config_entry)
assert not result
async def test_unauthorized_installed_app_raises_not_ready(
hass, config_entry, app, installed_app, smartthings_mock
):
"""Test config entry not ready raised when the app isn't authorized."""
config_entry.add_to_hass(hass)
installed_app.installed_app_status = InstalledAppStatus.PENDING
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
with pytest.raises(ConfigEntryNotReady):
await smartthings.async_setup_entry(hass, config_entry)
async def test_scenes_unauthorized_loads_platforms(
hass,
config_entry,
app,
installed_app,
device,
smartthings_mock,
subscription_factory,
):
"""Test if scenes are unauthorized we continue to load platforms."""
config_entry.add_to_hass(hass)
request_info = Mock(real_url="http://example.com")
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
smartthings_mock.devices.return_value = [device]
smartthings_mock.scenes.side_effect = ClientResponseError(
request_info=request_info, history=None, status=HTTP_FORBIDDEN
)
mock_token = Mock()
mock_token.access_token = str(uuid4())
mock_token.refresh_token = str(uuid4())
smartthings_mock.generate_tokens.return_value = mock_token
subscriptions = [
subscription_factory(capability) for capability in device.capabilities
]
smartthings_mock.subscriptions.return_value = subscriptions
with patch.object(hass.config_entries, "async_forward_entry_setup") as forward_mock:
assert await smartthings.async_setup_entry(hass, config_entry)
# Assert platforms loaded
await hass.async_block_till_done()
assert forward_mock.call_count == len(SUPPORTED_PLATFORMS)
async def test_config_entry_loads_platforms(
hass,
config_entry,
app,
installed_app,
device,
smartthings_mock,
subscription_factory,
scene,
):
"""Test config entry loads properly and proxies to platforms."""
config_entry.add_to_hass(hass)
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
smartthings_mock.devices.return_value = [device]
smartthings_mock.scenes.return_value = [scene]
mock_token = Mock()
mock_token.access_token = str(uuid4())
mock_token.refresh_token = str(uuid4())
smartthings_mock.generate_tokens.return_value = mock_token
subscriptions = [
subscription_factory(capability) for capability in device.capabilities
]
smartthings_mock.subscriptions.return_value = subscriptions
with patch.object(hass.config_entries, "async_forward_entry_setup") as forward_mock:
assert await smartthings.async_setup_entry(hass, config_entry)
# Assert platforms loaded
await hass.async_block_till_done()
assert forward_mock.call_count == len(SUPPORTED_PLATFORMS)
async def test_config_entry_loads_unconnected_cloud(
hass,
config_entry,
app,
installed_app,
device,
smartthings_mock,
subscription_factory,
scene,
):
"""Test entry loads during startup when cloud isn't connected."""
config_entry.add_to_hass(hass)
hass.data[DOMAIN][CONF_CLOUDHOOK_URL] = "https://test.cloud"
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
smartthings_mock.devices.return_value = [device]
smartthings_mock.scenes.return_value = [scene]
mock_token = Mock()
mock_token.access_token = str(uuid4())
mock_token.refresh_token = str(uuid4())
smartthings_mock.generate_tokens.return_value = mock_token
subscriptions = [
subscription_factory(capability) for capability in device.capabilities
]
smartthings_mock.subscriptions.return_value = subscriptions
with patch.object(hass.config_entries, "async_forward_entry_setup") as forward_mock:
assert await smartthings.async_setup_entry(hass, config_entry)
await hass.async_block_till_done()
assert forward_mock.call_count == len(SUPPORTED_PLATFORMS)
async def test_unload_entry(hass, config_entry):
"""Test entries are unloaded correctly."""
connect_disconnect = Mock()
smart_app = Mock()
smart_app.connect_event.return_value = connect_disconnect
broker = smartthings.DeviceBroker(hass, config_entry, Mock(), smart_app, [], [])
broker.connect()
hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id] = broker
with patch.object(
hass.config_entries, "async_forward_entry_unload", return_value=True
) as forward_mock:
assert await smartthings.async_unload_entry(hass, config_entry)
assert connect_disconnect.call_count == 1
assert config_entry.entry_id not in hass.data[DOMAIN][DATA_BROKERS]
# Assert platforms unloaded
await hass.async_block_till_done()
assert forward_mock.call_count == len(SUPPORTED_PLATFORMS)
async def test_remove_entry(hass, config_entry, smartthings_mock):
"""Test that the installed app and app are removed up."""
# Act
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
async def test_remove_entry_cloudhook(hass, config_entry, smartthings_mock):
"""Test that the installed app, app, and cloudhook are removed up."""
hass.config.components.add("cloud")
# Arrange
config_entry.add_to_hass(hass)
hass.data[DOMAIN][CONF_CLOUDHOOK_URL] = "https://test.cloud"
# Act
with patch.object(
cloud, "async_is_logged_in", return_value=True
) as mock_async_is_logged_in, patch.object(
cloud, "async_delete_cloudhook"
) as mock_async_delete_cloudhook:
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
assert mock_async_is_logged_in.call_count == 1
assert mock_async_delete_cloudhook.call_count == 1
async def test_remove_entry_app_in_use(hass, config_entry, smartthings_mock):
"""Test app is not removed if in use by another config entry."""
# Arrange
config_entry.add_to_hass(hass)
data = config_entry.data.copy()
data[CONF_INSTALLED_APP_ID] = str(uuid4())
entry2 = MockConfigEntry(version=2, domain=DOMAIN, data=data)
entry2.add_to_hass(hass)
# Act
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 0
async def test_remove_entry_already_deleted(hass, config_entry, smartthings_mock):
"""Test handles when the apps have already been removed."""
request_info = Mock(real_url="http://example.com")
# Arrange
smartthings_mock.delete_installed_app.side_effect = ClientResponseError(
request_info=request_info, history=None, status=HTTP_FORBIDDEN
)
smartthings_mock.delete_app.side_effect = ClientResponseError(
request_info=request_info, history=None, status=HTTP_FORBIDDEN
)
# Act
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
async def test_remove_entry_installedapp_api_error(
hass, config_entry, smartthings_mock
):
"""Test raises exceptions removing the installed app."""
request_info = Mock(real_url="http://example.com")
# Arrange
smartthings_mock.delete_installed_app.side_effect = ClientResponseError(
request_info=request_info, history=None, status=HTTP_INTERNAL_SERVER_ERROR
)
# Act
with pytest.raises(ClientResponseError):
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 0
async def test_remove_entry_installedapp_unknown_error(
hass, config_entry, smartthings_mock
):
"""Test raises exceptions removing the installed app."""
# Arrange
smartthings_mock.delete_installed_app.side_effect = Exception
# Act
with pytest.raises(Exception):
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 0
async def test_remove_entry_app_api_error(hass, config_entry, smartthings_mock):
"""Test raises exceptions removing the app."""
# Arrange
request_info = Mock(real_url="http://example.com")
smartthings_mock.delete_app.side_effect = ClientResponseError(
request_info=request_info, history=None, status=HTTP_INTERNAL_SERVER_ERROR
)
# Act
with pytest.raises(ClientResponseError):
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
async def test_remove_entry_app_unknown_error(hass, config_entry, smartthings_mock):
"""Test raises exceptions removing the app."""
# Arrange
smartthings_mock.delete_app.side_effect = Exception
# Act
with pytest.raises(Exception):
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
async def test_broker_regenerates_token(hass, config_entry):
"""Test the device broker regenerates the refresh token."""
token = Mock(OAuthToken)
token.refresh_token = str(uuid4())
stored_action = None
def async_track_time_interval(hass, action, interval):
nonlocal stored_action
stored_action = action
with patch(
"homeassistant.components.smartthings.async_track_time_interval",
new=async_track_time_interval,
):
broker = smartthings.DeviceBroker(hass, config_entry, token, Mock(), [], [])
broker.connect()
assert stored_action
await stored_action(None) # pylint:disable=not-callable
assert token.refresh.call_count == 1
assert config_entry.data[CONF_REFRESH_TOKEN] == token.refresh_token
async def test_event_handler_dispatches_updated_devices(
hass, config_entry, device_factory, event_request_factory, event_factory
):
"""Test the event handler dispatches updated devices."""
devices = [
device_factory("Bedroom 1 Switch", ["switch"]),
device_factory("Bathroom 1", ["switch"]),
device_factory("Sensor", ["motionSensor"]),
device_factory("Lock", ["lock"]),
]
device_ids = [
devices[0].device_id,
devices[1].device_id,
devices[2].device_id,
devices[3].device_id,
]
event = event_factory(
devices[3].device_id,
capability="lock",
attribute="lock",
value="locked",
data={"codeId": "1"},
)
request = event_request_factory(device_ids=device_ids, events=[event])
config_entry.data = {
**config_entry.data,
CONF_INSTALLED_APP_ID: request.installed_app_id,
}
called = False
def signal(ids):
nonlocal called
called = True
assert device_ids == ids
async_dispatcher_connect(hass, SIGNAL_SMARTTHINGS_UPDATE, signal)
broker = smartthings.DeviceBroker(hass, config_entry, Mock(), Mock(), devices, [])
broker.connect()
# pylint:disable=protected-access
await broker._event_handler(request, None, None)
await hass.async_block_till_done()
assert called
for device in devices:
assert device.status.values["Updated"] == "Value"
assert devices[3].status.attributes["lock"].value == "locked"
assert devices[3].status.attributes["lock"].data == {"codeId": "1"}
async def test_event_handler_ignores_other_installed_app(
hass, config_entry, device_factory, event_request_factory
):
"""Test the event handler dispatches updated devices."""
device = device_factory("Bedroom 1 Switch", ["switch"])
request = event_request_factory([device.device_id])
called = False
def signal(ids):
nonlocal called
called = True
async_dispatcher_connect(hass, SIGNAL_SMARTTHINGS_UPDATE, signal)
broker = smartthings.DeviceBroker(hass, config_entry, Mock(), Mock(), [device], [])
broker.connect()
# pylint:disable=protected-access
await broker._event_handler(request, None, None)
await hass.async_block_till_done()
assert not called
async def test_event_handler_fires_button_events(
hass, config_entry, device_factory, event_factory, event_request_factory
):
"""Test the event handler fires button events."""
device = device_factory("Button 1", ["button"])
event = event_factory(
device.device_id, capability="button", attribute="button", value="pushed"
)
request = event_request_factory(events=[event])
config_entry.data = {
**config_entry.data,
CONF_INSTALLED_APP_ID: request.installed_app_id,
}
called = False
def handler(evt):
nonlocal called
called = True
assert evt.data == {
"component_id": "main",
"device_id": device.device_id,
"location_id": event.location_id,
"value": "pushed",
"name": device.label,
"data": None,
}
hass.bus.async_listen(EVENT_BUTTON, handler)
broker = smartthings.DeviceBroker(hass, config_entry, Mock(), Mock(), [device], [])
broker.connect()
# pylint:disable=protected-access
await broker._event_handler(request, None, None)
await hass.async_block_till_done()
assert called
| mit |
huaweiswitch/neutron | neutron/tests/unit/ml2/drivers/cisco/nexus/test_cisco_nexus_db.py | 18 | 8656 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import testtools
from neutron.plugins.ml2.drivers.cisco.nexus import exceptions
from neutron.plugins.ml2.drivers.cisco.nexus import nexus_db_v2
from neutron.tests.unit import testlib_api
class CiscoNexusDbTest(testlib_api.SqlTestCase):
"""Unit tests for Cisco mechanism driver's Nexus port binding database."""
NpbObj = collections.namedtuple('NpbObj', 'port vlan switch instance')
def _npb_test_obj(self, pnum, vnum, switch='10.9.8.7', instance=None):
"""Creates a Nexus port binding test object from a pair of numbers."""
if pnum is 'router':
port = pnum
else:
port = '1/%s' % pnum
if instance is None:
instance = 'instance_%s_%s' % (pnum, vnum)
return self.NpbObj(port, vnum, switch, instance)
def _assert_bindings_match(self, npb, npb_obj):
"""Asserts that a port binding matches a port binding test obj."""
self.assertEqual(npb.port_id, npb_obj.port)
self.assertEqual(npb.vlan_id, npb_obj.vlan)
self.assertEqual(npb.switch_ip, npb_obj.switch)
self.assertEqual(npb.instance_id, npb_obj.instance)
def _add_binding_to_db(self, npb):
"""Adds a port binding to the Nexus database."""
return nexus_db_v2.add_nexusport_binding(
npb.port, npb.vlan, npb.switch, npb.instance)
def _add_bindings_to_db(self, npbs):
"""Adds a list of port bindings to the Nexus database."""
for npb in npbs:
nexus_db_v2.add_nexusport_binding(
npb.port, npb.vlan, npb.switch, npb.instance)
def _remove_binding_from_db(self, npb):
"""Removes a port binding from the Nexus database."""
return nexus_db_v2.remove_nexusport_binding(
npb.port, npb.vlan, npb.switch, npb.instance)
def _get_nexusport_binding(self, npb):
"""Gets a port binding based on port, vlan, switch, and instance."""
return nexus_db_v2.get_nexusport_binding(
npb.port, npb.vlan, npb.switch, npb.instance)
def _get_nexusvlan_binding(self, npb):
"""Gets port bindings based on vlan and switch."""
return nexus_db_v2.get_nexusvlan_binding(npb.vlan, npb.switch)
def _get_nexusvm_binding(self, npb):
"""Gets port binding based on vlan and instance."""
return nexus_db_v2.get_nexusvm_bindings(npb.vlan, npb.instance)[0]
def _get_port_vlan_switch_binding(self, npb):
"""Gets port bindings based on port, vlan, and switch."""
return nexus_db_v2.get_port_vlan_switch_binding(
npb.port, npb.vlan, npb.switch)
def _get_port_switch_bindings(self, npb):
"""Get port bindings based on port and switch."""
return nexus_db_v2.get_port_switch_bindings(npb.port, npb.switch)
def test_nexusportbinding_add_remove(self):
"""Tests add and removal of port bindings from the Nexus database."""
npb11 = self._npb_test_obj(10, 100)
npb = self._add_binding_to_db(npb11)
self._assert_bindings_match(npb, npb11)
npb = self._remove_binding_from_db(npb11)
self.assertEqual(len(npb), 1)
self._assert_bindings_match(npb[0], npb11)
with testtools.ExpectedException(exceptions.NexusPortBindingNotFound):
self._remove_binding_from_db(npb11)
def test_nexusportbinding_get(self):
"""Tests get of specific port bindings from the database."""
npb11 = self._npb_test_obj(10, 100)
npb21 = self._npb_test_obj(20, 100)
npb22 = self._npb_test_obj(20, 200)
self._add_bindings_to_db([npb11, npb21, npb22])
npb = self._get_nexusport_binding(npb11)
self.assertEqual(len(npb), 1)
self._assert_bindings_match(npb[0], npb11)
npb = self._get_nexusport_binding(npb21)
self.assertEqual(len(npb), 1)
self._assert_bindings_match(npb[0], npb21)
npb = self._get_nexusport_binding(npb22)
self.assertEqual(len(npb), 1)
self._assert_bindings_match(npb[0], npb22)
with testtools.ExpectedException(exceptions.NexusPortBindingNotFound):
nexus_db_v2.get_nexusport_binding(
npb21.port, npb21.vlan, npb21.switch, "dummyInstance")
def test_nexusvlanbinding_get(self):
"""Test get of port bindings based on vlan and switch."""
npb11 = self._npb_test_obj(10, 100)
npb21 = self._npb_test_obj(20, 100)
npb22 = self._npb_test_obj(20, 200)
self._add_bindings_to_db([npb11, npb21, npb22])
npb_all_v100 = self._get_nexusvlan_binding(npb11)
self.assertEqual(len(npb_all_v100), 2)
npb_v200 = self._get_nexusvlan_binding(npb22)
self.assertEqual(len(npb_v200), 1)
self._assert_bindings_match(npb_v200[0], npb22)
with testtools.ExpectedException(exceptions.NexusPortBindingNotFound):
nexus_db_v2.get_nexusvlan_binding(npb21.vlan, "dummySwitch")
def test_nexusvmbinding_get(self):
"""Test get of port bindings based on vlan and instance."""
npb11 = self._npb_test_obj(10, 100)
npb21 = self._npb_test_obj(20, 100)
npb22 = self._npb_test_obj(20, 200)
self._add_bindings_to_db([npb11, npb21, npb22])
npb = self._get_nexusvm_binding(npb21)
self._assert_bindings_match(npb, npb21)
npb = self._get_nexusvm_binding(npb22)
self._assert_bindings_match(npb, npb22)
with testtools.ExpectedException(exceptions.NexusPortBindingNotFound):
nexus_db_v2.get_nexusvm_bindings(npb21.vlan, "dummyInstance")[0]
def test_nexusportvlanswitchbinding_get(self):
"""Tests get of port bindings based on port, vlan, and switch."""
npb11 = self._npb_test_obj(10, 100)
npb21 = self._npb_test_obj(20, 100)
self._add_bindings_to_db([npb11, npb21])
npb = self._get_port_vlan_switch_binding(npb11)
self.assertEqual(len(npb), 1)
self._assert_bindings_match(npb[0], npb11)
with testtools.ExpectedException(exceptions.NexusPortBindingNotFound):
nexus_db_v2.get_port_vlan_switch_binding(
npb21.port, npb21.vlan, "dummySwitch")
def test_nexusportswitchbinding_get(self):
"""Tests get of port bindings based on port and switch."""
npb11 = self._npb_test_obj(10, 100)
npb21 = self._npb_test_obj(20, 100, switch='2.2.2.2')
npb22 = self._npb_test_obj(20, 200, switch='2.2.2.2')
self._add_bindings_to_db([npb11, npb21, npb22])
npb = self._get_port_switch_bindings(npb11)
self.assertEqual(len(npb), 1)
self._assert_bindings_match(npb[0], npb11)
npb_all_p20 = self._get_port_switch_bindings(npb21)
self.assertEqual(len(npb_all_p20), 2)
npb = nexus_db_v2.get_port_switch_bindings(npb21.port, "dummySwitch")
self.assertIsNone(npb)
def test_nexusbinding_update(self):
"""Tests update of vlan IDs for port bindings."""
npb11 = self._npb_test_obj(10, 100, switch='1.1.1.1', instance='test')
npb21 = self._npb_test_obj(20, 100, switch='1.1.1.1', instance='test')
self._add_bindings_to_db([npb11, npb21])
npb_all_v100 = nexus_db_v2.get_nexusvlan_binding(100, '1.1.1.1')
self.assertEqual(len(npb_all_v100), 2)
npb22 = self._npb_test_obj(20, 200, switch='1.1.1.1', instance='test')
npb = nexus_db_v2.update_nexusport_binding(npb21.port, 200)
self._assert_bindings_match(npb, npb22)
npb_all_v100 = nexus_db_v2.get_nexusvlan_binding(100, '1.1.1.1')
self.assertEqual(len(npb_all_v100), 1)
self._assert_bindings_match(npb_all_v100[0], npb11)
npb = nexus_db_v2.update_nexusport_binding(npb21.port, 0)
self.assertIsNone(npb)
npb33 = self._npb_test_obj(30, 300, switch='1.1.1.1', instance='test')
with testtools.ExpectedException(exceptions.NexusPortBindingNotFound):
nexus_db_v2.update_nexusport_binding(npb33.port, 200)
| apache-2.0 |
MinimalOS-AOSP/kernel_lge_hammerhead | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
ser/topitup | siema.py | 1 | 8241 | # Flask modules
from flask import (
Blueprint,
render_template,
redirect,
url_for,
request,
flash,
current_app,
g,
)
# FLask Login
from flask_login import (
current_user,
)
# WTForms
from flask_wtf import Form
from wtforms import (
SubmitField,
BooleanField,
DecimalField,
)
from wtforms.validators import DataRequired
# Mail
from flask_mail import Message
# Modules required for communication with pypayd
import requests
import json
# Other modules
from datetime import datetime
from datetime import timedelta
# Our own modules
from topitup import db
from frontend import login_required
from nav import (
nav,
top_nav
)
# Let's start!
class Payd(db.Model):
__bind_key__ = "topitup"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer)
time_creation = db.Column(db.DateTime)
time_payment = db.Column(db.DateTime)
order_id = db.Column(db.String(35), unique=True)
native_price = db.Column(db.Integer)
native_currency = db.Column(db.String(3))
btc_price = db.Column(db.Integer)
address = db.Column(db.String(35))
txn = db.Column(db.Integer, default=0)
def __init__(self, id, user_id, time_creation, time_payment, order_id,
native_price, native_currency, btc_price, address, txn):
self.id = id
self.user_id = user_id
self.time_creation = time_creation
self.time_payment = time_payment
self.order_id = order_id
self.native_price = native_price
self.native_currency = native_currency
self.btc_price = btc_price
self.address = address
self.txn = txn
def __repr__(self):
return '<Payd %r>' % self.id
# create sqlite database if it does not exist
try:
db.create_all(bind='topitup')
except:
pass
# Blueprint
siema = Blueprint('siema', __name__)
# Buy credits Form
class LoginForm(Form):
amount = DecimalField('Amount of Credits', validators=[DataRequired()])
confirm_me = BooleanField('Please confirm you agree to TOC',
validators=[DataRequired()])
submit = SubmitField("Buy Credits")
@siema.before_request
def before_request():
try:
g.user = current_user.username.decode('utf-8')
g.email = current_user.email.decode('utf-8')
# amount of Credits in user's account
g.credits = current_user.neuro
g.user_id = current_user.id
except:
g.user = None
g.credits = None
nav.register_element('top_nav', top_nav(g.user, g.credits))
# run every minute from cron to check for payments
@siema.route('/invoices/checkitup')
def checkitup():
# we collect all invoices which are not paid
sql_query = Payd.query.filter_by(
time_payment=datetime.fromtimestamp(0)).all()
for invoice in sql_query:
print(invoice)
howold = current_app.config['WARRANTY_TIME']
# ignore all invoices which are older than WARRANTY_TIME days
if invoice.time_creation + timedelta(days=howold) > datetime.now():
print(invoice.order_id)
# initiate conversation with pypayd
pypayd_headers = {'content-type': 'application/json'}
pypayd_payload = {
"method": "check_order_status",
"params": {"order_id": invoice.order_id},
"jsonrpc": "2.0",
"id": 0,
}
#pypayd_response = requests.post(
# current_app.config['PYPAYD_URI'],
# data=json.dumps(pypayd_payload),
# headers=pypayd_headers).json()
#print(pypayd_response)
#invoice.txn = 0
howmanyconfirmations = current_app.config['CONFIRMATIONS']
confirmations = pypayd_response['result']['amount']
# Huhu! We have a new payment!
if invoice.txn == 0 and confirmations > howmanyconfirmations:
# Send an email message if payment was registered
# From: DEFAULT_MAIL_SENDER
msg = Message()
msg.add_recipient(current_user.email)
msg.subject = "Payment confirmation"
msg.body = ""
# Register payment
invoice.time_payment = datetime.now()
# Register paid amount in the main database
balance = current_user.credits
current_user.credits = balance + pypayd_response['result']['amount']
# Housekeeping
invoice.txn = confirmations
# register all transactions in databases
db.session.commit()
flash('Thank you.', 'info')
return redirect(url_for('frontend.index'))
@siema.route('/invoices/id/<orderid>')
@login_required
def showinvoice(orderid):
sql_query = Payd.query.filter_by(
order_id=orderid).first()
return render_template('invoice-id.html',
invoice=sql_query,
)
@siema.route('/invoices/new', methods=('GET', 'POST'))
@login_required
def new():
form = LoginForm()
if form.validate_on_submit():
amount = request.form['amount']
confirm_me = False
if 'confirm_me' in request.form:
confirm_me = True
if confirm_me is False:
pass
# get a new transaction id
sql_query = Payd.query.all()
new_local_transaction_id = len(sql_query)
# TODO: deal with an unlikely event of concurrency
# initiate conversation with pypayd
pypayd_headers = {'content-type': 'application/json'}
pypayd_payload = {
"method": "create_order",
"params": {"amount": amount, "qr_code": True},
"jsonrpc": "2.0",
"id": new_local_transaction_id,
}
pypayd_response = requests.post(
current_app.config['PYPAYD_URI'],
data=json.dumps(pypayd_payload),
headers=pypayd_headers).json()
print(pypayd_response)
# insert stuff into our transaction database
to_db = Payd(
None,
g.user_id,
datetime.utcnow(),
datetime.fromtimestamp(0), # this is not a paid invoice, yet
pypayd_response['result']['order_id'],
amount,
"EUR",
pypayd_response['result']['amount'],
pypayd_response['result']['receiving_address'],
0,
)
db.session.add(to_db)
db.session.commit()
payme = {
'credits': amount,
'btc': pypayd_response['result']['amount'],
'address': pypayd_response['result']['receiving_address'],
'image': pypayd_response['result']['qr_image'],
}
# generate approximate time to pay the invoice
pay_time = datetime.now() + timedelta(minutes=45)
# and finally show an invoice to the customer
return render_template('invoice-payme.html',
payme=payme,
pay_time=pay_time)
return render_template('invoice-new.html', form=form)
# user has access to his own invoices only
@siema.route('/invoices/', defaults={'page': 1})
@siema.route('/invoices/page/<int:page>')
@login_required
def index(page):
# downloading all records related to user
sql_query = Payd.query.filter_by(
user_id=g.user_id).paginate(page,
current_app.config['INVOICES_PER_PAGE'])
return render_template('invoices.html',
invoices=sql_query,
)
# admin has access to all invoices
@siema.route('/admin/', defaults={'page': 1})
@siema.route('/admin/page/<int:page>')
@login_required
def admin(page):
# only user with id = 666 can enter this route
if g.user_id == 666:
sql_query = Payd.query.paginate(page, 50)
return render_template('invoices.html',
invoices=sql_query,
)
else:
flash('You are not admin and you can see your own invoices only!',
'warning')
return redirect(url_for('siema.index'))
| agpl-3.0 |
mccarrmb/moztrap | tests/view/lists/test_finder.py | 5 | 10978 | """
Tests for finder.
"""
from django.template.response import TemplateResponse
from django.test import RequestFactory
from mock import Mock, patch
from tests import case
class FinderDecoratorTest(case.DBTestCase):
"""Tests for the finder view decorator."""
@property
def finder(self):
"""The decorator under test."""
from moztrap.view.lists.decorators import finder
return finder
def on_response(self, response, decorator=None, request=None):
"""Apply given decorator to dummy view, return given response."""
decorator = decorator or self.finder(Mock())
request = request or RequestFactory().get("/")
@decorator
def view(request):
return response
return view(request)
def on_template_response(self, context, **kwargs):
"""Run TemplateResponse with given context through decorated view."""
request = kwargs.setdefault("request", RequestFactory().get("/"))
res = TemplateResponse(request, "some/template.html", context)
return self.on_response(res, **kwargs)
def test_returns_non_template_response(self):
"""Returns a non-TemplateResponse unmodified, without error."""
res = self.on_response("blah")
self.assertEqual(res, "blah")
def test_uses_wraps(self):
"""Preserves docstring and name of original view func."""
@self.finder(Mock())
def myview(request, some_id):
"""docstring"""
self.assertEqual(myview.func_name, "myview")
self.assertEqual(myview.func_doc, "docstring")
def test_passes_on_args(self):
"""Arguments are passed on to original view func."""
record = []
@self.finder(Mock())
def myview(request, *args, **kwargs):
record.extend([args, kwargs])
myview(RequestFactory().get("/"), "a", b=2)
self.assertEqual(record, [("a",), {"b": 2}])
@patch("moztrap.view.lists.finder.render")
def test_ajax(self, render):
"""Ajax response is rendered column template."""
render.return_value = "some HTML"
MockFinder = Mock()
f = MockFinder.return_value
f.column_template.return_value = "some/finder/_column.html"
f.objects.return_value = ["some", "objects"]
req = RequestFactory().get(
"/some/url",
{"finder": "1", "col": "things", "id": "2"},
HTTP_X_REQUESTED_WITH="XMLHttpRequest")
res = self.on_template_response(
{}, request=req, decorator=self.finder(MockFinder))
self.assertEqual(res, "some HTML")
self.assertEqual(
render.call_args[0][1:],
(
"some/finder/_column.html",
{
"colname": "things",
"finder": {
"finder": f,
"things": ["some", "objects"]
}
}
)
)
f.column_template.assert_called_with("things")
f.objects.assert_called_with("things", "2")
def test_no_ajax(self):
"""Non-ajax response has finder with top-column objects in context."""
MockFinder = Mock()
f = MockFinder.return_value
top_col = Mock()
top_col.name = "top"
f.columns = [top_col]
f.objects.return_value = ["some", "objects"]
res = self.on_template_response({}, decorator=self.finder(MockFinder))
self.assertIs(res.context_data["finder"]["finder"], f)
self.assertEqual(
res.context_data["finder"]["top"],
["some", "objects"]
)
f.objects.assert_called_with("top")
class FinderTest(case.DBTestCase):
"""Tests for Finder."""
@property
def ManageFinder(self):
"""ManageFinder; a sample finder subclass to exercise Finder."""
from moztrap.view.manage.finders import ManageFinder
return ManageFinder
def test_columns_by_name(self):
"""Index of columns by name."""
f = self.ManageFinder()
self.assertEqual(
sorted((n, c.name) for (n, c) in f.columns_by_name.items()),
[
("products", "products"),
("productversions", "productversions"),
("runs", "runs"),
("suites", "suites"),
]
)
def test_parent_columns(self):
"""Maps column name to parent column."""
f = self.ManageFinder()
self.assertEqual(
sorted((n, c.name) for (n, c) in f.parent_columns.items()),
[
("productversions", "products"),
("runs", "productversions"),
("suites", "runs"),
]
)
def test_child_columns(self):
"""Maps column name to child column."""
f = self.ManageFinder()
self.assertEqual(
sorted((n, c.name) for (n, c) in f.child_columns.items()),
[
("products", "productversions"),
("productversions", "runs"),
("runs", "suites")
]
)
def test_columns_by_model(self):
"""Index of columns by model."""
f = self.ManageFinder()
self.assertEqual(
sorted(
((m, c.name) for (m, c) in f.columns_by_model.items()),
key=lambda o: o[1]
),
[
(self.model.Product, "products"),
(self.model.ProductVersion, "productversions"),
(self.model.Run, "runs"),
(self.model.Suite, "suites"),
]
)
def test_column_template(self):
"""Joins finder base template to column template name."""
f = self.ManageFinder()
self.assertEqual(f.column_template("runs"), "manage/finder/_runs.html")
def test_bad_column_name(self):
"""Bad column name raises ValueError."""
f = self.ManageFinder()
with self.assertRaises(ValueError):
f.column_template("doesnotexist")
def test_goto_url(self):
"""Goto url is manage url for child objects, filtered by parent."""
f = self.ManageFinder()
obj = self.model.Suite(pk=2)
self.assertEqual(f.goto_url(obj), "/manage/cases/?filter-suite=2")
def test_goto_url_bad_object(self):
"""Goto url returns None if given object from unknown class."""
f = self.ManageFinder()
self.assertEqual(f.goto_url(Mock()), None)
def test_child_column_for_obj(self):
"""Returns child column name for given object."""
f = self.ManageFinder()
obj = self.model.Product()
child_col = f.child_column_for_obj(obj)
self.assertEqual(child_col, "productversions")
def test_child_column_for_bad_obj(self):
"""Returns None if obj isn't of a model class in this finder."""
f = self.ManageFinder()
child_col = f.child_column_for_obj(Mock())
self.assertEqual(child_col, None)
def test_child_column_for_last_obj(self):
"""Returns None if given object from final-column class."""
f = self.ManageFinder()
obj = self.model.Suite()
child_col = f.child_column_for_obj(obj)
self.assertEqual(child_col, None)
def test_child_query_url(self):
"""Returns ajax query url for list of child objects in next column."""
f = self.ManageFinder()
obj = self.model.Run(pk=5)
url = f.child_query_url(obj)
self.assertEqual(url, "?finder=1&col=suites&id=5")
def test_child_query_url_none(self):
"""Returns None for final column."""
f = self.ManageFinder()
obj = self.model.Suite(pk=5)
url = f.child_query_url(obj)
self.assertEqual(url, None)
def test_objects(self):
"""Without parent, objects is just pass-through to column objects."""
f = self.ManageFinder()
p = self.F.ProductFactory.create()
objects = f.objects("products")
self.assertEqual(list(objects), [p])
def test_objects_of_parent(self):
"""With parent, objects filters by parent."""
f = self.ManageFinder()
pv = self.F.ProductVersionFactory.create()
self.F.ProductVersionFactory.create()
objects = f.objects("productversions", pv.product.pk)
self.assertEqual(list(objects), [pv])
def test_parent_via_m2m(self):
"""Parent filtering also works via m2m relationship."""
f = self.ManageFinder()
rs = self.F.RunSuiteFactory.create()
self.F.SuiteFactory.create()
objects = f.objects("suites", rs.run.pk)
self.assertEqual(list(objects), [rs.suite])
def test_no_parent_relationship(self):
"""If no relationship to parent model is found, raises ValueError."""
from moztrap.view.lists.finder import Finder, Column
class BadFinder(Finder):
columns = [
Column(
"products",
"_products.html",
self.model.Product.objects.all()
),
Column("runs", "_runs.html", self.model.Run.objects.all()),
]
f = BadFinder()
with self.assertRaises(ValueError):
f.objects("runs", 1)
def test_objects_of_no_parent(self):
"""Passing in parent for top column raises ValueError."""
f = self.ManageFinder()
with self.assertRaises(ValueError):
f.objects("products", 3)
def test_objects_bad_col(self):
"""Asking for objects of bad column raises ValueError."""
f = self.ManageFinder()
with self.assertRaises(ValueError):
f.objects("doesnotexist")
class ColumnTest(case.DBTestCase):
"""Tests for finder Column."""
@property
def column(self):
from moztrap.view.lists.finder import Column
return Column
def test_objects(self):
"""Objects method is just .all() on given queryset."""
qs = Mock()
c = self.column("thing", "_things.html", qs)
objects = c.objects()
self.assertIs(objects, qs.all.return_value)
@patch("moztrap.view.lists.finder.filter_url")
def test_goto_url(self, filter_url):
"""goto_url method calls filter_url if goto is given."""
c = self.column("thing", "_things.html", Mock(), "goto_name")
obj = Mock()
url = c.goto_url(obj)
self.assertIs(url, filter_url.return_value)
filter_url.assert_called_with("goto_name", obj)
def test_no_goto_url(self):
"""goto_url method just returns None if no goto is given."""
c = self.column("thing", "_things.html", Mock())
url = c.goto_url(Mock())
self.assertIs(url, None)
| bsd-2-clause |
codecollision/DropboxToFlickr | django/contrib/localflavor/fr/fr_department.py | 314 | 3326 | # -*- coding: utf-8 -*-
DEPARTMENT_ASCII_CHOICES = (
('01', '01 - Ain'),
('02', '02 - Aisne'),
('03', '03 - Allier'),
('04', '04 - Alpes-de-Haute-Provence'),
('05', '05 - Hautes-Alpes'),
('06', '06 - Alpes-Maritimes'),
('07', '07 - Ardeche'),
('08', '08 - Ardennes'),
('09', '09 - Ariege'),
('10', '10 - Aube'),
('11', '11 - Aude'),
('12', '12 - Aveyron'),
('13', '13 - Bouches-du-Rhone'),
('14', '14 - Calvados'),
('15', '15 - Cantal'),
('16', '16 - Charente'),
('17', '17 - Charente-Maritime'),
('18', '18 - Cher'),
('19', '19 - Correze'),
('21', '21 - Cote-d\'Or'),
('22', '22 - Cotes-d\'Armor'),
('23', '23 - Creuse'),
('24', '24 - Dordogne'),
('25', '25 - Doubs'),
('26', '26 - Drome'),
('27', '27 - Eure'),
('28', '28 - Eure-et-Loire'),
('29', '29 - Finistere'),
('2A', '2A - Corse-du-Sud'),
('2B', '2B - Haute-Corse'),
('30', '30 - Gard'),
('31', '31 - Haute-Garonne'),
('32', '32 - Gers'),
('33', '33 - Gironde'),
('34', '34 - Herault'),
('35', '35 - Ille-et-Vilaine'),
('36', '36 - Indre'),
('37', '37 - Indre-et-Loire'),
('38', '38 - Isere'),
('39', '39 - Jura'),
('40', '40 - Landes'),
('41', '41 - Loir-et-Cher'),
('42', '42 - Loire'),
('43', '43 - Haute-Loire'),
('44', '44 - Loire-Atlantique'),
('45', '45 - Loiret'),
('46', '46 - Lot'),
('47', '47 - Lot-et-Garonne'),
('48', '48 - Lozere'),
('49', '49 - Maine-et-Loire'),
('50', '50 - Manche'),
('51', '51 - Marne'),
('52', '52 - Haute-Marne'),
('53', '53 - Mayenne'),
('54', '54 - Meurthe-et-Moselle'),
('55', '55 - Meuse'),
('56', '56 - Morbihan'),
('57', '57 - Moselle'),
('58', '58 - Nievre'),
('59', '59 - Nord'),
('60', '60 - Oise'),
('61', '61 - Orne'),
('62', '62 - Pas-de-Calais'),
('63', '63 - Puy-de-Dome'),
('64', '64 - Pyrenees-Atlantiques'),
('65', '65 - Hautes-Pyrenees'),
('66', '66 - Pyrenees-Orientales'),
('67', '67 - Bas-Rhin'),
('68', '68 - Haut-Rhin'),
('69', '69 - Rhone'),
('70', '70 - Haute-Saone'),
('71', '71 - Saone-et-Loire'),
('72', '72 - Sarthe'),
('73', '73 - Savoie'),
('74', '74 - Haute-Savoie'),
('75', '75 - Paris'),
('76', '76 - Seine-Maritime'),
('77', '77 - Seine-et-Marne'),
('78', '78 - Yvelines'),
('79', '79 - Deux-Sevres'),
('80', '80 - Somme'),
('81', '81 - Tarn'),
('82', '82 - Tarn-et-Garonne'),
('83', '83 - Var'),
('84', '84 - Vaucluse'),
('85', '85 - Vendee'),
('86', '86 - Vienne'),
('87', '87 - Haute-Vienne'),
('88', '88 - Vosges'),
('89', '89 - Yonne'),
('90', '90 - Territoire de Belfort'),
('91', '91 - Essonne'),
('92', '92 - Hauts-de-Seine'),
('93', '93 - Seine-Saint-Denis'),
('94', '94 - Val-de-Marne'),
('95', '95 - Val-d\'Oise'),
('971', '971 - Guadeloupe'),
('972', '972 - Martinique'),
('973', '973 - Guyane'),
('974', '974 - La Reunion'),
('975', '975 - Saint-Pierre-et-Miquelon'),
('976', '976 - Mayotte'),
('984', '984 - Terres Australes et Antarctiques'),
('986', '986 - Wallis et Futuna'),
('987', '987 - Polynesie Francaise'),
('988', '988 - Nouvelle-Caledonie'),
)
| bsd-3-clause |
Amperture/twitch-sbc-integration | streamlabs/streamlabsrun.py | 1 | 3163 | import requests
from config import STREAMLABS_SECRET, STREAMLABS_ID, STREAMLABS_REDIRECT
from decimal import Decimal
import pprint
import os
import json
import time
def streamlabs_handler(q_twitchbeagle, q_gpio):
#Grab streamlabs tokens
headers = []
while True:
try:
with open('slrefreshtoken', 'r') as f:
r_token = f.read()
with open('slaccesstoken', 'r') as f:
a_token = f.read()
token_call = {
'grant_type' : 'refresh_token',
'client_id' : STREAMLABS_ID,
'client_secret' : STREAMLABS_SECRET,
'redirect_uri' : STREAMLABS_REDIRECT,
'refresh_token' : r_token
}
donations_params = {
'access_token' : a_token,
'limit' : 1,
'currency' : "USD"
}
time.sleep(10)
r = requests.post(
'https://streamlabs.com/api/v1.0/token',
data = token_call,
headers = headers
)
a_token = r.json()['access_token']
r_token = r.json()['refresh_token']
with open('slaccesstoken', 'w') as f:
f.write(a_token)
donations_params['access_token'] = a_token
with open('slrefreshtoken', 'w') as f:
f.write(r_token)
donationsurl = "https://streamlabs.com/api/v1.0/donations"
donate = requests.get(
donationsurl,
headers = headers,
params = donations_params
)
#usd_two_places = float(format(usd_value, '.2f')))
donationinfo = donate.json()['data'][0]
#print('amount', donationinfo['amount'])
#print('donor', donationinfo['name'])
#print('message', donationinfo['message'])
with open("streamlabs_latest_donation", 'r') as f:
latestdonation = int(f.read())
if latestdonation != donationinfo['donation_id']:
queueEvent = {
'eventType' : 'electrical',
'event' : 'bits'
}
q_twitchbeagle.put(queueEvent)
TWOPLACES = Decimal(10) ** -2
queueEvent = {
'eventType' : 'twitchchatbot',
'event' : 'Donation from %s for $%s.' % (
donationinfo['name'],
Decimal(donationinfo['amount']).\
quantize(TWOPLACES))
}
q_twitchbeagle.put(queueEvent)
with open("streamlabs_latest_donation", 'w') as f:
print(donationinfo['donation_id'])
print("WE ARE WRITING TO THE FILE")
f.write(str(donationinfo['donation_id']))
print("WE HAVE WRITTEN TO THE FILE")
except Exception,e:
print e
pass
| mit |
florian-dacosta/OpenUpgrade | addons/account/project/wizard/account_analytic_inverted_balance_report.py | 378 | 2045 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_analytic_inverted_balance(osv.osv_memory):
_name = 'account.analytic.inverted.balance'
_description = 'Account Analytic Inverted Balance'
_columns = {
'date1': fields.date('Start of period', required=True),
'date2': fields.date('End of period', required=True),
}
_defaults = {
'date1': lambda *a: time.strftime('%Y-01-01'),
'date2': lambda *a: time.strftime('%Y-%m-%d')
}
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids)[0]
datas = {
'ids': context.get('active_ids', []),
'model': 'account.analytic.account',
'form': data
}
datas['form']['active_ids'] = context.get('active_ids', False)
return self.pool['report'].get_action(cr, uid, [], 'account.report_invertedanalyticbalance', data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
wxgeo/geophar | wxgeometrie/sympy/geometry/entity.py | 6 | 20514 | """The definition of the base geometrical entity with attributes common to
all derived geometrical entities.
Contains
========
GeometryEntity
GeometricSet
Notes
=====
A GeometryEntity is any object that has special geometric properties.
A GeometrySet is a superclass of any GeometryEntity that can also
be viewed as a sympy.sets.Set. In particular, points are the only
GeometryEntity not considered a Set.
Rn is a GeometrySet representing n-dimensional Euclidean space. R2 and
R3 are currently the only ambient spaces implemented.
"""
from __future__ import division, print_function
from sympy.core.compatibility import is_sequence
from sympy.core.containers import Tuple
from sympy.core.basic import Basic
from sympy.core.symbol import _symbol
from sympy.core.sympify import sympify
from sympy.functions import cos, sin
from sympy.matrices import eye
from sympy.sets import Set
from sympy.utilities.misc import func_name
from sympy.multipledispatch import dispatch
from sympy.sets.handlers.union import union_sets
from sympy.sets.handlers.intersection import intersection_sets
# How entities are ordered; used by __cmp__ in GeometryEntity
ordering_of_classes = [
"Point2D",
"Point3D",
"Point",
"Segment2D",
"Ray2D",
"Line2D",
"Segment3D",
"Line3D",
"Ray3D",
"Segment",
"Ray",
"Line",
"Plane",
"Triangle",
"RegularPolygon",
"Polygon",
"Circle",
"Ellipse",
"Curve",
"Parabola"
]
class GeometryEntity(Basic):
"""The base class for all geometrical entities.
This class doesn't represent any particular geometric entity, it only
provides the implementation of some methods common to all subclasses.
"""
def __cmp__(self, other):
"""Comparison of two GeometryEntities."""
n1 = self.__class__.__name__
n2 = other.__class__.__name__
c = (n1 > n2) - (n1 < n2)
if not c:
return 0
i1 = -1
for cls in self.__class__.__mro__:
try:
i1 = ordering_of_classes.index(cls.__name__)
break
except ValueError:
i1 = -1
if i1 == -1:
return c
i2 = -1
for cls in other.__class__.__mro__:
try:
i2 = ordering_of_classes.index(cls.__name__)
break
except ValueError:
i2 = -1
if i2 == -1:
return c
return (i1 > i2) - (i1 < i2)
def __contains__(self, other):
"""Subclasses should implement this method for anything more complex than equality."""
if type(self) == type(other):
return self == other
raise NotImplementedError()
def __getnewargs__(self):
"""Returns a tuple that will be passed to __new__ on unpickling."""
return tuple(self.args)
def __ne__(self, o):
"""Test inequality of two geometrical entities."""
return not self == o
def __new__(cls, *args, **kwargs):
# Points are sequences, but they should not
# be converted to Tuples, so use this detection function instead.
def is_seq_and_not_point(a):
# we cannot use isinstance(a, Point) since we cannot import Point
if hasattr(a, 'is_Point') and a.is_Point:
return False
return is_sequence(a)
args = [Tuple(*a) if is_seq_and_not_point(a) else sympify(a) for a in args]
return Basic.__new__(cls, *args)
def __radd__(self, a):
"""Implementation of reverse add method."""
return a.__add__(self)
def __rdiv__(self, a):
"""Implementation of reverse division method."""
return a.__div__(self)
def __repr__(self):
"""String representation of a GeometryEntity that can be evaluated
by sympy."""
return type(self).__name__ + repr(self.args)
def __rmul__(self, a):
"""Implementation of reverse multiplication method."""
return a.__mul__(self)
def __rsub__(self, a):
"""Implementation of reverse substraction method."""
return a.__sub__(self)
def __str__(self):
"""String representation of a GeometryEntity."""
from sympy.printing import sstr
return type(self).__name__ + sstr(self.args)
def _eval_subs(self, old, new):
from sympy.geometry.point import Point, Point3D
if is_sequence(old) or is_sequence(new):
if isinstance(self, Point3D):
old = Point3D(old)
new = Point3D(new)
else:
old = Point(old)
new = Point(new)
return self._subs(old, new)
def _repr_svg_(self):
"""SVG representation of a GeometryEntity suitable for IPython"""
from sympy.core.evalf import N
try:
bounds = self.bounds
except (NotImplementedError, TypeError):
# if we have no SVG representation, return None so IPython
# will fall back to the next representation
return None
svg_top = '''<svg xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink"
width="{1}" height="{2}" viewBox="{0}"
preserveAspectRatio="xMinYMin meet">
<defs>
<marker id="markerCircle" markerWidth="8" markerHeight="8"
refx="5" refy="5" markerUnits="strokeWidth">
<circle cx="5" cy="5" r="1.5" style="stroke: none; fill:#000000;"/>
</marker>
<marker id="markerArrow" markerWidth="13" markerHeight="13" refx="2" refy="4"
orient="auto" markerUnits="strokeWidth">
<path d="M2,2 L2,6 L6,4" style="fill: #000000;" />
</marker>
<marker id="markerReverseArrow" markerWidth="13" markerHeight="13" refx="6" refy="4"
orient="auto" markerUnits="strokeWidth">
<path d="M6,2 L6,6 L2,4" style="fill: #000000;" />
</marker>
</defs>'''
# Establish SVG canvas that will fit all the data + small space
xmin, ymin, xmax, ymax = map(N, bounds)
if xmin == xmax and ymin == ymax:
# This is a point; buffer using an arbitrary size
xmin, ymin, xmax, ymax = xmin - .5, ymin -.5, xmax + .5, ymax + .5
else:
# Expand bounds by a fraction of the data ranges
expand = 0.1 # or 10%; this keeps arrowheads in view (R plots use 4%)
widest_part = max([xmax - xmin, ymax - ymin])
expand_amount = widest_part * expand
xmin -= expand_amount
ymin -= expand_amount
xmax += expand_amount
ymax += expand_amount
dx = xmax - xmin
dy = ymax - ymin
width = min([max([100., dx]), 300])
height = min([max([100., dy]), 300])
scale_factor = 1. if max(width, height) == 0 else max(dx, dy) / max(width, height)
try:
svg = self._svg(scale_factor)
except (NotImplementedError, TypeError):
# if we have no SVG representation, return None so IPython
# will fall back to the next representation
return None
view_box = "{0} {1} {2} {3}".format(xmin, ymin, dx, dy)
transform = "matrix(1,0,0,-1,0,{0})".format(ymax + ymin)
svg_top = svg_top.format(view_box, width, height)
return svg_top + (
'<g transform="{0}">{1}</g></svg>'
).format(transform, svg)
def _svg(self, scale_factor=1., fill_color="#66cc99"):
"""Returns SVG path element for the GeometryEntity.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
fill_color : str, optional
Hex string for fill color. Default is "#66cc99".
"""
raise NotImplementedError()
def _sympy_(self):
return self
@property
def ambient_dimension(self):
"""What is the dimension of the space that the object is contained in?"""
raise NotImplementedError()
@property
def bounds(self):
"""Return a tuple (xmin, ymin, xmax, ymax) representing the bounding
rectangle for the geometric figure.
"""
raise NotImplementedError()
def encloses(self, o):
"""
Return True if o is inside (not on or outside) the boundaries of self.
The object will be decomposed into Points and individual Entities need
only define an encloses_point method for their class.
See Also
========
sympy.geometry.ellipse.Ellipse.encloses_point
sympy.geometry.polygon.Polygon.encloses_point
Examples
========
>>> from sympy import RegularPolygon, Point, Polygon
>>> t = Polygon(*RegularPolygon(Point(0, 0), 1, 3).vertices)
>>> t2 = Polygon(*RegularPolygon(Point(0, 0), 2, 3).vertices)
>>> t2.encloses(t)
True
>>> t.encloses(t2)
False
"""
from sympy.geometry.point import Point
from sympy.geometry.line import Segment, Ray, Line
from sympy.geometry.ellipse import Ellipse
from sympy.geometry.polygon import Polygon, RegularPolygon
if isinstance(o, Point):
return self.encloses_point(o)
elif isinstance(o, Segment):
return all(self.encloses_point(x) for x in o.points)
elif isinstance(o, Ray) or isinstance(o, Line):
return False
elif isinstance(o, Ellipse):
return self.encloses_point(o.center) and \
self.encloses_point(
Point(o.center.x + o.hradius, o.center.y)) and \
not self.intersection(o)
elif isinstance(o, Polygon):
if isinstance(o, RegularPolygon):
if not self.encloses_point(o.center):
return False
return all(self.encloses_point(v) for v in o.vertices)
raise NotImplementedError()
def equals(self, o):
return self == o
def intersection(self, o):
"""
Returns a list of all of the intersections of self with o.
Notes
=====
An entity is not required to implement this method.
If two different types of entities can intersect, the item with
higher index in ordering_of_classes should implement
intersections with anything having a lower index.
See Also
========
sympy.geometry.util.intersection
"""
raise NotImplementedError()
def is_similar(self, other):
"""Is this geometrical entity similar to another geometrical entity?
Two entities are similar if a uniform scaling (enlarging or
shrinking) of one of the entities will allow one to obtain the other.
Notes
=====
This method is not intended to be used directly but rather
through the `are_similar` function found in util.py.
An entity is not required to implement this method.
If two different types of entities can be similar, it is only
required that one of them be able to determine this.
See Also
========
scale
"""
raise NotImplementedError()
def reflect(self, line):
"""
Reflects an object across a line.
Parameters
==========
line: Line
Examples
========
>>> from sympy import pi, sqrt, Line, RegularPolygon
>>> l = Line((0, pi), slope=sqrt(2))
>>> pent = RegularPolygon((1, 2), 1, 5)
>>> rpent = pent.reflect(l)
>>> rpent
RegularPolygon(Point2D(-2*sqrt(2)*pi/3 - 1/3 + 4*sqrt(2)/3, 2/3 + 2*sqrt(2)/3 + 2*pi/3), -1, 5, -atan(2*sqrt(2)) + 3*pi/5)
>>> from sympy import pi, Line, Circle, Point
>>> l = Line((0, pi), slope=1)
>>> circ = Circle(Point(0, 0), 5)
>>> rcirc = circ.reflect(l)
>>> rcirc
Circle(Point2D(-pi, pi), -5)
"""
from sympy import atan, Point, Dummy, oo
g = self
l = line
o = Point(0, 0)
if l.slope == 0:
y = l.args[0].y
if not y: # x-axis
return g.scale(y=-1)
reps = [(p, p.translate(y=2*(y - p.y))) for p in g.atoms(Point)]
elif l.slope == oo:
x = l.args[0].x
if not x: # y-axis
return g.scale(x=-1)
reps = [(p, p.translate(x=2*(x - p.x))) for p in g.atoms(Point)]
else:
if not hasattr(g, 'reflect') and not all(
isinstance(arg, Point) for arg in g.args):
raise NotImplementedError(
'reflect undefined or non-Point args in %s' % g)
a = atan(l.slope)
c = l.coefficients
d = -c[-1]/c[1] # y-intercept
# apply the transform to a single point
x, y = Dummy(), Dummy()
xf = Point(x, y)
xf = xf.translate(y=-d).rotate(-a, o).scale(y=-1
).rotate(a, o).translate(y=d)
# replace every point using that transform
reps = [(p, xf.xreplace({x: p.x, y: p.y})) for p in g.atoms(Point)]
return g.xreplace(dict(reps))
def rotate(self, angle, pt=None):
"""Rotate ``angle`` radians counterclockwise about Point ``pt``.
The default pt is the origin, Point(0, 0)
See Also
========
scale, translate
Examples
========
>>> from sympy import Point, RegularPolygon, Polygon, pi
>>> t = Polygon(*RegularPolygon(Point(0, 0), 1, 3).vertices)
>>> t # vertex on x axis
Triangle(Point2D(1, 0), Point2D(-1/2, sqrt(3)/2), Point2D(-1/2, -sqrt(3)/2))
>>> t.rotate(pi/2) # vertex on y axis now
Triangle(Point2D(0, 1), Point2D(-sqrt(3)/2, -1/2), Point2D(sqrt(3)/2, -1/2))
"""
newargs = []
for a in self.args:
if isinstance(a, GeometryEntity):
newargs.append(a.rotate(angle, pt))
else:
newargs.append(a)
return type(self)(*newargs)
def scale(self, x=1, y=1, pt=None):
"""Scale the object by multiplying the x,y-coordinates by x and y.
If pt is given, the scaling is done relative to that point; the
object is shifted by -pt, scaled, and shifted by pt.
See Also
========
rotate, translate
Examples
========
>>> from sympy import RegularPolygon, Point, Polygon
>>> t = Polygon(*RegularPolygon(Point(0, 0), 1, 3).vertices)
>>> t
Triangle(Point2D(1, 0), Point2D(-1/2, sqrt(3)/2), Point2D(-1/2, -sqrt(3)/2))
>>> t.scale(2)
Triangle(Point2D(2, 0), Point2D(-1, sqrt(3)/2), Point2D(-1, -sqrt(3)/2))
>>> t.scale(2, 2)
Triangle(Point2D(2, 0), Point2D(-1, sqrt(3)), Point2D(-1, -sqrt(3)))
"""
from sympy.geometry.point import Point
if pt:
pt = Point(pt, dim=2)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
return type(self)(*[a.scale(x, y) for a in self.args]) # if this fails, override this class
def translate(self, x=0, y=0):
"""Shift the object by adding to the x,y-coordinates the values x and y.
See Also
========
rotate, scale
Examples
========
>>> from sympy import RegularPolygon, Point, Polygon
>>> t = Polygon(*RegularPolygon(Point(0, 0), 1, 3).vertices)
>>> t
Triangle(Point2D(1, 0), Point2D(-1/2, sqrt(3)/2), Point2D(-1/2, -sqrt(3)/2))
>>> t.translate(2)
Triangle(Point2D(3, 0), Point2D(3/2, sqrt(3)/2), Point2D(3/2, -sqrt(3)/2))
>>> t.translate(2, 2)
Triangle(Point2D(3, 2), Point2D(3/2, sqrt(3)/2 + 2),
Point2D(3/2, -sqrt(3)/2 + 2))
"""
newargs = []
for a in self.args:
if isinstance(a, GeometryEntity):
newargs.append(a.translate(x, y))
else:
newargs.append(a)
return self.func(*newargs)
def parameter_value(self, other, t):
"""Return the parameter corresponding to the given point.
Evaluating an arbitrary point of the entity at this parameter
value will return the given point.
Examples
========
>>> from sympy import Line, Point
>>> from sympy.abc import t
>>> a = Point(0, 0)
>>> b = Point(2, 2)
>>> Line(a, b).parameter_value((1, 1), t)
{t: 1/2}
>>> Line(a, b).arbitrary_point(t).subs(_)
Point2D(1, 1)
"""
from sympy.geometry.point import Point
from sympy.core.symbol import Dummy
from sympy.solvers.solvers import solve
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if not isinstance(other, Point):
raise ValueError("other must be a point")
T = Dummy('t', real=True)
sol = solve(self.arbitrary_point(T) - other, T, dict=True)
if not sol:
raise ValueError("Given point is not on %s" % func_name(self))
return {t: sol[0][T]}
class GeometrySet(GeometryEntity, Set):
"""Parent class of all GeometryEntity that are also Sets
(compatible with sympy.sets)
"""
def _contains(self, other):
"""sympy.sets uses the _contains method, so include it for compatibility."""
if isinstance(other, Set) and other.is_FiniteSet:
return all(self.__contains__(i) for i in other)
return self.__contains__(other)
@dispatch(GeometrySet, Set)
def union_sets(self, o):
""" Returns the union of self and o
for use with sympy.sets.Set, if possible. """
from sympy.sets import Union, FiniteSet
# if its a FiniteSet, merge any points
# we contain and return a union with the rest
if o.is_FiniteSet:
other_points = [p for p in o if not self._contains(p)]
if len(other_points) == len(o):
return None
return Union(self, FiniteSet(*other_points))
if self._contains(o):
return self
return None
@dispatch(GeometrySet, Set)
def intersection_sets(self, o):
""" Returns a sympy.sets.Set of intersection objects,
if possible. """
from sympy.sets import Set, FiniteSet, Union
from sympy.geometry import Point
try:
# if o is a FiniteSet, find the intersection directly
# to avoid infinite recursion
if o.is_FiniteSet:
inter = FiniteSet(*(p for p in o if self.contains(p)))
else:
inter = self.intersection(o)
except NotImplementedError:
# sympy.sets.Set.reduce expects None if an object
# doesn't know how to simplify
return None
# put the points in a FiniteSet
points = FiniteSet(*[p for p in inter if isinstance(p, Point)])
non_points = [p for p in inter if not isinstance(p, Point)]
return Union(*(non_points + [points]))
def translate(x, y):
"""Return the matrix to translate a 2-D point by x and y."""
rv = eye(3)
rv[2, 0] = x
rv[2, 1] = y
return rv
def scale(x, y, pt=None):
"""Return the matrix to multiply a 2-D point's coordinates by x and y.
If pt is given, the scaling is done relative to that point."""
rv = eye(3)
rv[0, 0] = x
rv[1, 1] = y
if pt:
from sympy.geometry.point import Point
pt = Point(pt, dim=2)
tr1 = translate(*(-pt).args)
tr2 = translate(*pt.args)
return tr1*rv*tr2
return rv
def rotate(th):
"""Return the matrix to rotate a 2-D point about the origin by ``angle``.
The angle is measured in radians. To Point a point about a point other
then the origin, translate the Point, do the rotation, and
translate it back:
>>> from sympy.geometry.entity import rotate, translate
>>> from sympy import Point, pi
>>> rot_about_11 = translate(-1, -1)*rotate(pi/2)*translate(1, 1)
>>> Point(1, 1).transform(rot_about_11)
Point2D(1, 1)
>>> Point(0, 0).transform(rot_about_11)
Point2D(2, 0)
"""
s = sin(th)
rv = eye(3)*cos(th)
rv[0, 1] = s
rv[1, 0] = -s
rv[2, 2] = 1
return rv
| gpl-2.0 |
OWASP/django-DefectDojo | dojo/search/views.py | 2 | 7676 | import logging
from django.shortcuts import render
from tagging.models import TaggedItem
from watson import search as watson
from django.db.models import Q
from dojo.forms import SimpleSearchForm
from dojo.models import Finding, Finding_Template, Product, Test, Endpoint, Engagement, Languages, \
App_Analysis
from dojo.utils import add_breadcrumb
logger = logging.getLogger(__name__)
def simple_search(request):
ip_addresses = []
dashes = []
query = []
tests = None
findings = None
finding_templates = None
products = None
tagged_tests = None
tagged_findings = None
tagged_products = None
tagged_endpoints = None
tagged_engagements = None
tagged_finding_templates = None
engagements = None
endpoints = None
languages = None
app_analysis = None
clean_query = ''
cookie = False
terms = ''
form = SimpleSearchForm()
if request.method == 'GET' and "query" in request.GET:
form = SimpleSearchForm(request.GET)
if form.is_valid():
cookie = True
clean_query = form.cleaned_data['query']
search_operator = ""
# Check for search operator like finding:, endpoint:, test: product:
if ":" in clean_query:
operator = clean_query.split(":")
search_operator = operator[0]
clean_query = operator[1].lstrip()
tags = clean_query
if request.user.is_staff:
if "finding" in search_operator or search_operator is "":
findings = watson.search(clean_query, models=(Finding,))
if "template" in search_operator or search_operator is "":
finding_templates = watson.search(clean_query, models=(Finding_Template,))
if "test" in search_operator or search_operator is "":
tests = watson.search(clean_query, models=(Test,))
if "product" in search_operator or search_operator is "":
products = watson.search(clean_query, models=(Product,))
if "tag" in search_operator or search_operator is "":
tagged_findings = TaggedItem.objects.get_by_model(Finding,
tags)
tagged_finding_templates = TaggedItem.objects.get_by_model(Finding_Template,
tags)
tagged_tests = TaggedItem.objects.get_by_model(Test, tags)
tagged_products = TaggedItem.objects.get_by_model(Product,
tags)
tagged_endpoints = TaggedItem.objects.get_by_model(Endpoint,
tags)
tagged_engagements = TaggedItem.objects.get_by_model(
Engagement, tags)
# endpoints = watson.search(clean_query, models=(Endpoint,))
if "endpoint" in search_operator or search_operator is "":
endpoints = Endpoint.objects.filter(Q(host__icontains=clean_query) | Q(path__icontains=clean_query) | Q(fqdn__icontains=clean_query) | Q(protocol__icontains=clean_query))
if "engagement" in search_operator or search_operator is "":
engagements = watson.search(clean_query, models=(Engagement,))
if "language" in search_operator or search_operator is "":
languages = Languages.objects.filter(language__language__icontains=clean_query)
if "technology" in search_operator or search_operator is "":
app_analysis = App_Analysis.objects.filter(name__icontains=clean_query)
else:
if "finding" in search_operator or search_operator is "":
findings = watson.search(clean_query, models=(
Finding.objects.filter(
test__engagement__product__authorized_users__in=[
request.user]),))
if "template" in search_operator or search_operator is "":
finding_templates = watson.search(clean_query, models=(
Finding_Template.objects.filter(
authorized_users__in=[
request.user]),))
if "test" in search_operator or search_operator is "":
tests = watson.search(
clean_query,
models=(Test.objects.filter(
engagement__product__authorized_users__in=[
request.user]),))
if "product" in search_operator or search_operator is "":
products = watson.search(clean_query, models=(
Product.objects.filter(authorized_users__in=[
request.user]),))
if "tag" in search_operator or search_operator is "":
tagged_findings = TaggedItem.objects.get_by_model(
Finding.objects.filter(
test__engagement__product__authorized_users__in=[
request.user]), tags)
tagged_finding_templates = TaggedItem.objects.get_by_model(
Finding_Template.objects.filter(
authorized_users__in=[
request.user]), tags)
tagged_tests = TaggedItem.objects.get_by_model(
Test.objects.filter(
engagement__product__authorized_users__in=[
request.user]), tags)
tagged_products = TaggedItem.objects.get_by_model(
Product.objects.filter(
authorized_users__in=[request.user]), tags)
tagged_endpoints = TaggedItem.objects.get_by_model(
Endpoint.objects.filter(
product__authorized_users__in=[request.user]), tags)
tagged_engagements = TaggedItem.objects.get_by_model(
Engagement.objects.filter(
product__authorized_users__in=[request.user]), tags)
else:
form = SimpleSearchForm()
add_breadcrumb(title="Simple Search", top_level=True, request=request)
response = render(request, 'dojo/simple_search.html', {
'clean_query': clean_query,
'languages': languages,
'app_analysis': app_analysis,
'tests': tests,
'findings': findings,
'finding_templates': finding_templates,
'products': products,
'tagged_tests': tagged_tests,
'tagged_findings': tagged_findings,
'tagged_finding_templates': tagged_finding_templates,
'tagged_products': tagged_products,
'tagged_endpoints': tagged_endpoints,
'tagged_engagements': tagged_engagements,
'engagements': engagements,
'endpoints': endpoints,
'name': 'Simple Search',
'metric': False,
'user': request.user,
'form': form})
if cookie:
response.set_cookie("highlight", value=clean_query,
max_age=None, expires=None,
path='/', secure=True, httponly=False)
else:
response.delete_cookie("highlight", path='/')
return response
| bsd-3-clause |
PowerDNS/exabgp | lib/exabgp/bgp/message/open/capability/addpath.py | 2 | 1640 | # encoding: utf-8
"""
addpath.py
Created by Thomas Mangin on 2012-07-17.
Copyright (c) 2009-2013 Exa Networks. All rights reserved.
"""
from struct import pack
from exabgp.protocol.family import AFI
from exabgp.protocol.family import SAFI
from exabgp.bgp.message.open.capability import Capability
# ====================================================================== AddPath
#
class AddPath (Capability,dict):
ID = Capability.ID.ADD_PATH
string = {
0 : 'disabled',
1 : 'receive',
2 : 'send',
3 : 'send/receive',
}
def __init__ (self,families=(),send_receive=0):
for afi,safi in families:
self.add_path(afi,safi,send_receive)
def add_path (self,afi,safi,send_receive):
self[(afi,safi)] = send_receive
def __str__ (self):
return 'AddPath(' + ','.join(["%s %s %s" % (self.string[self[aafi]],xafi,xsafi) for (aafi,xafi,xsafi) in [((afi,safi),str(afi),str(safi)) for (afi,safi) in self]]) + ')'
def json (self):
families = ','.join('"%s/%s": "%s"' % (xafi,xsafi,self.string[self[aafi]]) for (aafi,xafi,xsafi) in (((afi,safi),str(afi),str(safi)) for (afi,safi) in self))
return '{ "name": "addpath"%s%s }' % (', ' if families else '', families)
def extract (self):
rs = []
for v in self:
if self[v]:
rs.append(v[0].pack() +v[1].pack() + pack('!B',self[v]))
return rs
@staticmethod
def unpack (capability,instance,data):
# XXX: FIXME: should check that we have not yet seen the capability
while data:
afi = AFI.unpack(data[:2])
safi = SAFI.unpack(data[2])
sr = ord(data[3])
instance.add_path(afi,safi,sr)
data = data[4:]
return instance
AddPath.register_capability()
| bsd-3-clause |
itbabu/saleor | saleor/cart/migrations/0001_initial.py | 7 | 3072 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
import django.core.validators
import django_prices.models
import satchless.item
import uuid
class Migration(migrations.Migration):
dependencies = [
('product', '0012_auto_20160218_0812'),
('discount', '0003_auto_20160207_0534'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('status', models.CharField(default='open', max_length=32, verbose_name='order status', choices=[('open', 'Open - currently active'), ('payment', 'Waiting for payment'), ('saved', 'Saved - for items to be purchased later'), ('ordered', 'Submitted - has been ordered at the checkout'), ('checkout', 'Checkout - basket is processed in checkout'), ('canceled', 'Canceled - basket was canceled by user')])),
('created', models.DateTimeField(auto_now_add=True, verbose_name='created')),
('last_status_change', models.DateTimeField(auto_now_add=True, verbose_name='last status change')),
('email', models.EmailField(max_length=254, null=True, blank=True)),
('token', models.UUIDField(primary_key=True, default=uuid.uuid4, serialize=False, editable=False, verbose_name='token')),
('checkout_data', models.TextField(null=True, editable=False)),
('total', django_prices.models.PriceField(default=0, currency='USD', max_digits=12, decimal_places=2)),
('quantity', models.PositiveIntegerField(default=0)),
('user', models.ForeignKey(related_name='carts', verbose_name='user', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('voucher', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, to='discount.Voucher', null=True)),
],
options={
'ordering': ('-last_status_change',),
},
bases=(models.Model, satchless.item.ItemSet),
),
migrations.CreateModel(
name='CartLine',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quantity', models.PositiveIntegerField(verbose_name='quantity', validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(999)])),
('data', models.TextField(default='{}', blank=True)),
('cart', models.ForeignKey(related_name='lines', to='cart.Cart')),
('product', models.ForeignKey(related_name='+', verbose_name='product', to='product.ProductVariant')),
],
bases=(models.Model, satchless.item.ItemLine),
),
migrations.AlterUniqueTogether(
name='cartline',
unique_together=set([('cart', 'product', 'data')]),
),
]
| bsd-3-clause |
lirui0081/depotwork | depotwork/auth/forms.py | 1 | 3788 | from django import forms
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from depotwork.settings import ALLOWED_SIGNUP_DOMAINS
def SignupDomainValidator(value):
if '*' not in ALLOWED_SIGNUP_DOMAINS:
try:
domain = value[value.index("@"):]
if domain not in ALLOWED_SIGNUP_DOMAINS:
raise ValidationError(u'Invalid domain. Allowed domains on this network: {0}'.format(','.join(ALLOWED_SIGNUP_DOMAINS)))
except Exception, e:
raise ValidationError(u'Invalid domain. Allowed domains on this network: {0}'.format(','.join(ALLOWED_SIGNUP_DOMAINS)))
def ForbiddenUsernamesValidator(value):
forbidden_usernames = ['admin', 'settings', 'news', 'about', 'help', 'signin', 'signup',
'signout', 'terms', 'privacy', 'cookie', 'new', 'login', 'logout', 'administrator',
'join', 'account', 'username', 'root', 'blog', 'user', 'users', 'billing', 'subscribe',
'reviews', 'review', 'blog', 'blogs', 'edit', 'mail', 'email', 'home', 'job', 'jobs',
'contribute', 'newsletter', 'shop', 'profile', 'register', 'auth', 'authentication',
'campaign', 'config', 'delete', 'remove', 'forum', 'forums', 'download', 'downloads',
'contact', 'blogs', 'feed', 'feeds', 'faq', 'intranet', 'log', 'registration', 'search',
'explore', 'rss', 'support', 'status', 'static', 'media', 'setting', 'css', 'js',
'follow', 'activity', 'questions', 'articles', 'network',]
if value.lower() in forbidden_usernames:
raise ValidationError('This is a reserved word.')
def InvalidUsernameValidator(value):
if '@' in value or '+' in value or '-' in value:
raise ValidationError('Enter a valid username.')
def UniqueEmailValidator(value):
if User.objects.filter(email__iexact=value).exists():
raise ValidationError('User with this Email already exists.')
def UniqueUsernameIgnoreCaseValidator(value):
if User.objects.filter(username__iexact=value).exists():
raise ValidationError('User with this Username already exists.')
class SignUpForm(forms.ModelForm):
username = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}),
max_length=30,
required=True,
help_text='Usernames may contain <strong>alphanumeric</strong>, <strong>_</strong> and <strong>.</strong> characters')
password = forms.CharField(widget=forms.PasswordInput(attrs={'class':'form-control'}))
confirm_password = forms.CharField(widget=forms.PasswordInput(attrs={'class':'form-control'}),
label="Confirm your password",
required=True)
email = forms.CharField(widget=forms.EmailInput(attrs={'class':'form-control'}),
required=True,
max_length=75)
class Meta:
model = User
exclude = ['last_login', 'date_joined']
fields = ['username', 'email', 'password', 'confirm_password',]
def __init__(self, *args, **kwargs):
super(SignUpForm, self).__init__(*args, **kwargs)
self.fields['username'].validators.append(ForbiddenUsernamesValidator)
self.fields['username'].validators.append(InvalidUsernameValidator)
self.fields['username'].validators.append(UniqueUsernameIgnoreCaseValidator)
self.fields['email'].validators.append(UniqueEmailValidator)
self.fields['email'].validators.append(SignupDomainValidator)
def clean(self):
super(SignUpForm, self).clean()
password = self.cleaned_data.get('password')
confirm_password = self.cleaned_data.get('confirm_password')
if password and password != confirm_password:
self._errors['password'] = self.error_class(['Passwords don\'t match'])
return self.cleaned_data | mit |
mantaraya36/xmms2-mantaraya36 | wafadmin/Tools/kde4.py | 19 | 2339 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
import os, sys, re
import Options, TaskGen, Task, Utils
from TaskGen import taskgen, feature, after
class msgfmt_taskgen(TaskGen.task_gen):
def __init__(self, *k, **kw):
TaskGen.task_gen.__init__(self, *k, **kw)
@feature('msgfmt')
def init_msgfmt(self):
#langs = '' # for example "foo/fr foo/br"
self.default_install_path = '${KDE4_LOCALE_INSTALL_DIR}'
@feature('msgfmt')
@after('init_msgfmt')
def apply_msgfmt(self):
for lang in self.to_list(self.langs):
node = self.path.find_resource(lang+'.po')
task = self.create_task('msgfmt', node, node.change_ext('.mo'))
if not self.bld.is_install: continue
langname = lang.split('/')
langname = langname[-1]
task.install_path = self.install_path + os.sep + langname + os.sep + 'LC_MESSAGES'
task.filename = getattr(self, 'appname', 'set_your_appname') + '.mo'
task.chmod = self.chmod
def detect(conf):
kdeconfig = conf.find_program('kde4-config')
if not kdeconfig:
conf.fatal('we need kde4-config')
prefix = Utils.cmd_output('%s --prefix' % kdeconfig, silent=True).strip()
file = '%s/share/apps/cmake/modules/KDELibsDependencies.cmake' % prefix
try: os.stat(file)
except OSError:
file = '%s/share/kde4/apps/cmake/modules/KDELibsDependencies.cmake' % prefix
try: os.stat(file)
except OSError: conf.fatal('could not open %s' % file)
try:
txt = Utils.readf(file)
except (OSError, IOError):
conf.fatal('could not read %s' % file)
txt = txt.replace('\\\n', '\n')
fu = re.compile('#(.*)\n')
txt = fu.sub('', txt)
setregexp = re.compile('([sS][eE][tT]\s*\()\s*([^\s]+)\s+\"([^"]+)\"\)')
found = setregexp.findall(txt)
for (_, key, val) in found:
#print key, val
conf.env[key] = val
# well well, i could just write an interpreter for cmake files
conf.env['LIB_KDECORE']='kdecore'
conf.env['LIB_KDEUI'] ='kdeui'
conf.env['LIB_KIO'] ='kio'
conf.env['LIB_KHTML'] ='khtml'
conf.env['LIB_KPARTS'] ='kparts'
conf.env['LIBPATH_KDECORE'] = conf.env['KDE4_LIB_INSTALL_DIR']
conf.env['CPPPATH_KDECORE'] = conf.env['KDE4_INCLUDE_INSTALL_DIR']
conf.env.append_value('CPPPATH_KDECORE', conf.env['KDE4_INCLUDE_INSTALL_DIR']+"/KDE")
conf.env['MSGFMT'] = conf.find_program('msgfmt')
Task.simple_task_type('msgfmt', '${MSGFMT} ${SRC} -o ${TGT}', color='BLUE', shell=False)
| lgpl-2.1 |
ffsdmad/af-web | cgi-bin/app.py | 1 | 1483 | #!/usr/bin/env python
import config
import os,sys,inspect
import ffcgi
#from base import bd_sql
#from user import user
class run_plugins():
def print_message(self, text):
print "Content-Type: text/html; charset=utf8\n\n"
print text
def load_plugins(self, plugin_dir):
modules, packages_obj, i = [], [], 0
for fname in os.listdir(plugin_dir):
if fname.endswith (".py"):
module_name = fname[:-3]
if module_name in ffcgi.cgi_value("plugins") and module_name != "__init__":
try:
packages_obj.append( __import__(plugin_dir + "." + module_name) )
modules.append( module_name )
i += 1
except Exception, e:
self.print_message( e)
print "error:",plugin_dir + "." + module_name
return modules, packages_obj, i
def __init__(self, plugin_dir = "plugins"):
modules, packages_obj, count = self.load_plugins(plugin_dir)
if count:
for i in range(0,count):
module_obj = getattr (packages_obj[i], modules[i])
for elem in dir (module_obj):
Clas = getattr (module_obj, elem)
if inspect.isclass (Clas):
obj=Clas()
else:
self.print_message( "no run" )
if __name__ == '__main__':
run_plugins()
| gpl-3.0 |
sujeet4github/MyLangUtils | LangPython/oreilly-intro-to-flask-video/venv/lib/python3.6/site-packages/jinja2/defaults.py | 130 | 1323 | # -*- coding: utf-8 -*-
"""
jinja2.defaults
~~~~~~~~~~~~~~~
Jinja default filters and tags.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2._compat import range_type
from jinja2.utils import generate_lorem_ipsum, Cycler, Joiner
# defaults for the parser / lexer
BLOCK_START_STRING = '{%'
BLOCK_END_STRING = '%}'
VARIABLE_START_STRING = '{{'
VARIABLE_END_STRING = '}}'
COMMENT_START_STRING = '{#'
COMMENT_END_STRING = '#}'
LINE_STATEMENT_PREFIX = None
LINE_COMMENT_PREFIX = None
TRIM_BLOCKS = False
LSTRIP_BLOCKS = False
NEWLINE_SEQUENCE = '\n'
KEEP_TRAILING_NEWLINE = False
# default filters, tests and namespace
from jinja2.filters import FILTERS as DEFAULT_FILTERS
from jinja2.tests import TESTS as DEFAULT_TESTS
DEFAULT_NAMESPACE = {
'range': range_type,
'dict': dict,
'lipsum': generate_lorem_ipsum,
'cycler': Cycler,
'joiner': Joiner
}
# default policies
DEFAULT_POLICIES = {
'compiler.ascii_str': True,
'urlize.rel': 'noopener',
'urlize.target': None,
'truncate.leeway': 5,
'json.dumps_function': None,
'json.dumps_kwargs': {'sort_keys': True},
}
# export all constants
__all__ = tuple(x for x in locals().keys() if x.isupper())
| gpl-3.0 |
oasiswork/odoo | openerp/service/common.py | 281 | 1873 | # -*- coding: utf-8 -*-
import logging
import openerp.release
import openerp.tools
from openerp.tools.translate import _
import security
_logger = logging.getLogger(__name__)
RPC_VERSION_1 = {
'server_version': openerp.release.version,
'server_version_info': openerp.release.version_info,
'server_serie': openerp.release.serie,
'protocol_version': 1,
}
def dispatch(method, params):
if method not in ['login', 'about', 'timezone_get',
'version', 'authenticate', 'set_loglevel']:
raise Exception("Method not found: %s" % method)
fn = globals()['exp_' + method]
return fn(*params)
def exp_login(db, login, password):
# TODO: legacy indirection through 'security', should use directly
# the res.users model
res = security.login(db, login, password)
msg = res and 'successful login' or 'bad login or password'
_logger.info("%s from '%s' using database '%s'", msg, login, db.lower())
return res or False
def exp_authenticate(db, login, password, user_agent_env):
res_users = openerp.registry(db)['res.users']
return res_users.authenticate(db, login, password, user_agent_env)
def exp_version():
return RPC_VERSION_1
def exp_about(extended=False):
"""Return information about the OpenERP Server.
@param extended: if True then return version info
@return string if extended is False else tuple
"""
info = _('See http://openerp.com')
if extended:
return info, openerp.release.version
return info
def exp_timezone_get(db, login, password):
return openerp.tools.misc.get_server_timezone()
def exp_set_loglevel(loglevel, logger=None):
# TODO Previously, the level was set on the now deprecated
# `openerp.netsvc.Logger` class.
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
palaniyappanBala/androguard | androguard/core/analysis/analysis.py | 10 | 70640 | # This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re, random, cPickle, collections
from androguard.core.androconf import error, warning, debug, is_ascii_problem,\
load_api_specific_resource_module
from androguard.core.bytecodes import dvm
from androguard.core.bytecodes.api_permissions import DVM_PERMISSIONS_BY_PERMISSION, DVM_PERMISSIONS_BY_ELEMENT
class ContextField(object):
def __init__(self, mode):
self.mode = mode
self.details = []
def set_details(self, details):
for i in details:
self.details.append( i )
class ContextMethod(object):
def __init__(self):
self.details = []
def set_details(self, details):
for i in details:
self.details.append( i )
class ExternalFM(object):
def __init__(self, class_name, name, descriptor):
self.class_name = class_name
self.name = name
self.descriptor = descriptor
def get_class_name(self):
return self.class_name
def get_name(self):
return self.name
def get_descriptor(self):
return self.descriptor
class ToString(object):
def __init__(self, tab):
self.__tab = tab
self.__re_tab = {}
for i in self.__tab:
self.__re_tab[i] = []
for j in self.__tab[i]:
self.__re_tab[i].append( re.compile( j ) )
self.__string = ""
def push(self, name):
for i in self.__tab:
for j in self.__re_tab[i]:
if j.match(name) != None:
if len(self.__string) > 0:
if i == 'O' and self.__string[-1] == 'O':
continue
self.__string += i
def get_string(self):
return self.__string
class BreakBlock(object):
def __init__(self, _vm, idx):
self._vm = _vm
self._start = idx
self._end = self._start
self._ins = []
self._ops = []
self._fields = {}
self._methods = {}
def get_ops(self):
return self._ops
def get_fields(self):
return self._fields
def get_methods(self):
return self._methods
def push(self, ins):
self._ins.append(ins)
self._end += ins.get_length()
def get_start(self):
return self._start
def get_end(self):
return self._end
def show(self):
for i in self._ins:
print "\t\t",
i.show(0)
DVM_FIELDS_ACCESS = {
"iget" : "R",
"iget-wide" : "R",
"iget-object" : "R",
"iget-boolean" : "R",
"iget-byte" : "R",
"iget-char" : "R",
"iget-short" : "R",
"iput" : "W",
"iput-wide" : "W",
"iput-object" : "W",
"iput-boolean" : "W",
"iput-byte" : "W",
"iput-char" : "W",
"iput-short" : "W",
"sget" : "R",
"sget-wide" : "R",
"sget-object" : "R",
"sget-boolean" : "R",
"sget-byte" : "R",
"sget-char" : "R",
"sget-short" : "R",
"sput" : "W",
"sput-wide" : "W",
"sput-object" : "W",
"sput-boolean" : "W",
"sput-byte" : "W",
"sput-char" : "W",
"sput-short" : "W",
}
class DVMBasicBlock(object):
"""
A simple basic block of a dalvik method
"""
def __init__(self, start, vm, method, context):
self.__vm = vm
self.method = method
self.context = context
self.last_length = 0
self.nb_instructions = 0
self.fathers = []
self.childs = []
self.start = start
self.end = self.start
self.special_ins = {}
self.name = "%s-BB@0x%x" % (self.method.get_name(), self.start)
self.exception_analysis = None
self.tainted_variables = self.context.get_tainted_variables()
self.tainted_packages = self.context.get_tainted_packages()
self.notes = []
def get_notes(self):
return self.notes
def set_notes(self, value):
self.notes = [value]
def add_note(self, note):
self.notes.append(note)
def clear_notes(self):
self.notes = []
def get_instructions(self):
"""
Get all instructions from a basic block.
:rtype: Return all instructions in the current basic block
"""
tmp_ins = []
idx = 0
for i in self.method.get_instructions():
if idx >= self.start and idx < self.end:
tmp_ins.append(i)
idx += i.get_length()
return tmp_ins
def get_nb_instructions(self):
return self.nb_instructions
def get_method(self):
return self.method
def get_name(self):
return "%s-BB@0x%x" % (self.method.get_name(), self.start)
def get_start(self):
return self.start
def get_end(self):
return self.end
def get_last(self):
return self.get_instructions()[-1]
def get_next(self):
"""
Get next basic blocks
:rtype: a list of the next basic blocks
"""
return self.childs
def get_prev(self):
"""
Get previous basic blocks
:rtype: a list of the previous basic blocks
"""
return self.fathers
def set_fathers(self, f):
self.fathers.append(f)
def get_last_length(self):
return self.last_length
def set_childs(self, values):
#print self, self.start, self.end, values
if values == []:
next_block = self.context.get_basic_block( self.end + 1 )
if next_block != None:
self.childs.append( ( self.end - self.get_last_length(), self.end, next_block ) )
else:
for i in values:
if i != -1:
next_block = self.context.get_basic_block( i )
if next_block != None:
self.childs.append( ( self.end - self.get_last_length(), i, next_block) )
for c in self.childs:
if c[2] != None:
c[2].set_fathers( ( c[1], c[0], self ) )
def push(self, i):
try:
self.nb_instructions += 1
idx = self.end
self.last_length = i.get_length()
self.end += self.last_length
op_value = i.get_op_value()
# field access
if (op_value >= 0x52 and op_value <= 0x6d):
desc = self.__vm.get_cm_field(i.get_ref_kind())
if self.tainted_variables != None:
self.tainted_variables.push_info(TAINTED_FIELD, desc, DVM_FIELDS_ACCESS[i.get_name()][0], idx, self.method)
# invoke
elif (op_value >= 0x6e and op_value <= 0x72) or (op_value >= 0x74 and op_value <= 0x78):
idx_meth = i.get_ref_kind()
method_info = self.__vm.get_cm_method(idx_meth)
if self.tainted_packages != None:
self.tainted_packages.push_info(method_info[0], TAINTED_PACKAGE_CALL, idx, self.method, idx_meth)
# new_instance
elif op_value == 0x22:
idx_type = i.get_ref_kind()
type_info = self.__vm.get_cm_type(idx_type)
if self.tainted_packages != None:
self.tainted_packages.push_info(type_info, TAINTED_PACKAGE_CREATE, idx, self.method, None)
# const-string
elif (op_value >= 0x1a and op_value <= 0x1b):
string_name = self.__vm.get_cm_string(i.get_ref_kind())
if self.tainted_variables != None:
self.tainted_variables.push_info(TAINTED_STRING, string_name, "R", idx, self.method)
elif op_value == 0x26 or (op_value >= 0x2b and op_value <= 0x2c):
code = self.method.get_code().get_bc()
self.special_ins[idx] = code.get_ins_off(idx + i.get_ref_off() * 2)
except:
pass
def get_special_ins(self, idx):
"""
Return the associated instruction to a specific instruction (for example a packed/sparse switch)
:param idx: the index of the instruction
:rtype: None or an Instruction
"""
try:
return self.special_ins[idx]
except:
return None
def get_exception_analysis(self):
return self.exception_analysis
def set_exception_analysis(self, exception_analysis):
self.exception_analysis = exception_analysis
TAINTED_LOCAL_VARIABLE = 0
TAINTED_FIELD = 1
TAINTED_STRING = 2
class PathVar(object):
def __init__(self, access, idx, dst_idx, info_obj):
self.access_flag = access
self.idx = idx
self.dst_idx = dst_idx
self.info_obj = info_obj
def get_var_info(self):
return self.info_obj.get_info()
def get_access_flag(self):
return self.access_flag
def get_src(self, cm):
method = cm.get_method_ref( self.idx )
return method.get_class_name(), method.get_name(), method.get_descriptor()
def get_dst(self, cm):
method = cm.get_method_ref( self.dst_idx )
return method.get_class_name(), method.get_name(), method.get_descriptor()
def get_idx(self):
return self.idx
class TaintedVariable(object):
def __init__(self, var, _type):
self.var = var
self.type = _type
self.paths = {}
self.__cache = []
def get_type(self):
return self.type
def get_info(self):
if self.type == TAINTED_FIELD:
return [ self.var[0], self.var[2], self.var[1] ]
return self.var
def push(self, access, idx, ref):
m_idx = ref.get_method_idx()
if m_idx not in self.paths:
self.paths[ m_idx ] = []
self.paths[ m_idx ].append( (access, idx) )
def get_paths_access(self, mode):
for i in self.paths:
for j in self.paths[ i ]:
for k, v in self.paths[ i ][ j ]:
if k in mode:
yield i, j, k, v
def get_paths(self):
if self.__cache != []:
return self.__cache
for i in self.paths:
for j in self.paths[ i ]:
self.__cache.append( [j, i] )
#yield j, i
return self.__cache
def get_paths_length(self):
return len(self.paths)
def show_paths(self, vm):
show_PathVariable( vm, self.get_paths() )
class TaintedVariables(object):
def __init__(self, _vm):
self.__vm = _vm
self.__vars = {
TAINTED_LOCAL_VARIABLE : {},
TAINTED_FIELD : {},
TAINTED_STRING : {},
}
self.__cache_field_by_method = {}
self.__cache_string_by_method = {}
self.AOSP_PERMISSIONS_MODULE = load_api_specific_resource_module("aosp_permissions", self.__vm.get_api_version())
self.API_PERMISSION_MAPPINGS_MODULE = load_api_specific_resource_module("api_permission_mappings", self.__vm.get_api_version())
# functions to get particulars elements
def get_string(self, s):
try:
return self.__vars[ TAINTED_STRING ][ s ]
except KeyError:
return None
def get_field(self, class_name, name, descriptor):
key = class_name + descriptor + name
try:
return self.__vars[ TAINTED_FIELD ] [ key ]
except KeyError:
return None
def toPathVariable(self, obj):
z = []
for i in obj.get_paths():
access, idx = i[0]
m_idx = i[1]
z.append( PathVar(access, idx, m_idx, obj ) )
return z
# permission functions
def get_permissions_method(self, method):
permissions = set()
for f, f1 in self.get_fields():
data = "%s-%s-%s" % (f.var[0], f.var[2], f.var[1])
if data in self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_FIELDS"].keys():
for path in f.get_paths():
#access, idx = path[0]
m_idx = path[1]
if m_idx == method.get_idx():
permissions.update(self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_FIELDS"][data])
return permissions
def get_permissions(self, permissions_needed):
"""
@param permissions_needed : a list of restricted permissions to get ([] returns all permissions)
@rtype : a dictionnary of permissions' paths
"""
permissions = {}
pn = set(permissions_needed)
if permissions_needed == []:
pn = set(self.AOSP_PERMISSIONS_MODULE["AOSP_PERMISSIONS"].keys())
for f, _ in self.get_fields():
data = "%s-%s-%s" % (f.var[0], f.var[2], f.var[1])
if data in self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_FIELDS"].keys():
perm_intersection = pn.intersection(self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_FIELDS"][data])
for p in perm_intersection:
try:
permissions[p].extend(self.toPathVariable(f))
except KeyError:
permissions[p] = []
permissions[p].extend(self.toPathVariable(f))
return permissions
# global functions
def get_strings(self):
for i in self.__vars[ TAINTED_STRING ]:
yield self.__vars[ TAINTED_STRING ][ i ], i
def get_fields(self):
for i in self.__vars[ TAINTED_FIELD ]:
yield self.__vars[ TAINTED_FIELD ][ i ], i
# specifics functions
def get_strings_by_method(self, method):
z = {}
try:
for i in self.__cache_string_by_method[ method.get_method_idx() ]:
z[ i ] = []
for j in i.get_paths():
if method.get_method_idx() == j[1]:
z[i].append( j[0] )
return z
except:
return z
def get_fields_by_method(self, method):
z = {}
try:
for i in self.__cache_field_by_method[ method.get_method_idx() ]:
z[ i ] = []
for j in i.get_paths():
if method.get_method_idx() == j[1]:
z[i].append( j[0] )
return z
except:
return z
def add(self, var, _type, _method=None):
if _type == TAINTED_FIELD:
key = var[0] + var[1] + var[2]
if key not in self.__vars[ TAINTED_FIELD ]:
self.__vars[ TAINTED_FIELD ][ key ] = TaintedVariable( var, _type )
elif _type == TAINTED_STRING:
if var not in self.__vars[ TAINTED_STRING ]:
self.__vars[ TAINTED_STRING ][ var ] = TaintedVariable( var, _type )
elif _type == TAINTED_LOCAL_VARIABLE:
if _method not in self.__vars[ TAINTED_LOCAL_VARIABLE ]:
self.__vars[ TAINTED_LOCAL_VARIABLE ][ _method ] = {}
if var not in self.__vars[ TAINTED_LOCAL_VARIABLE ][ _method ]:
self.__vars[ TAINTED_LOCAL_VARIABLE ][ _method ][ var ] = TaintedVariable( var, _type )
def push_info(self, _type, var, access, idx, ref):
if _type == TAINTED_FIELD:
self.add( var, _type )
key = var[0] + var[1] + var[2]
self.__vars[ _type ][ key ].push( access, idx, ref )
method_idx = ref.get_method_idx()
if method_idx not in self.__cache_field_by_method:
self.__cache_field_by_method[ method_idx ] = set()
self.__cache_field_by_method[ method_idx ].add( self.__vars[ TAINTED_FIELD ][ key ] )
elif _type == TAINTED_STRING:
self.add( var, _type )
self.__vars[ _type ][ var ].push( access, idx, ref )
method_idx = ref.get_method_idx()
if method_idx not in self.__cache_string_by_method:
self.__cache_string_by_method[ method_idx ] = set()
self.__cache_string_by_method[ method_idx ].add( self.__vars[ TAINTED_STRING ][ var ] )
TAINTED_PACKAGE_CREATE = 0
TAINTED_PACKAGE_CALL = 1
TAINTED_PACKAGE = {
TAINTED_PACKAGE_CREATE : "C",
TAINTED_PACKAGE_CALL : "M"
}
def show_Path(vm, path):
cm = vm.get_class_manager()
if isinstance(path, PathVar):
dst_class_name, dst_method_name, dst_descriptor = path.get_dst( cm )
info_var = path.get_var_info()
print "%s %s (0x%x) ---> %s->%s%s" % (path.get_access_flag(),
info_var,
path.get_idx(),
dst_class_name,
dst_method_name,
dst_descriptor)
else:
if path.get_access_flag() == TAINTED_PACKAGE_CALL:
src_class_name, src_method_name, src_descriptor = path.get_src( cm )
dst_class_name, dst_method_name, dst_descriptor = path.get_dst( cm )
print "%d %s->%s%s (0x%x) ---> %s->%s%s" % (path.get_access_flag(),
src_class_name,
src_method_name,
src_descriptor,
path.get_idx(),
dst_class_name,
dst_method_name,
dst_descriptor)
else:
src_class_name, src_method_name, src_descriptor = path.get_src( cm )
print "%d %s->%s%s (0x%x)" % (path.get_access_flag(),
src_class_name,
src_method_name,
src_descriptor,
path.get_idx())
def get_Path(vm, path):
x = {}
cm = vm.get_class_manager()
if isinstance(path, PathVar):
dst_class_name, dst_method_name, dst_descriptor = path.get_dst( cm )
info_var = path.get_var_info()
x["src"] = "%s" % info_var
x["dst"] = "%s %s %s" % (dst_class_name, dst_method_name, dst_descriptor)
x["idx"] = path.get_idx()
else:
if path.get_access_flag() == TAINTED_PACKAGE_CALL:
src_class_name, src_method_name, src_descriptor = path.get_src( cm )
dst_class_name, dst_method_name, dst_descriptor = path.get_dst( cm )
x["src"] = "%s %s %s" % (src_class_name, src_method_name, src_descriptor)
x["dst"] = "%s %s %s" % (dst_class_name, dst_method_name, dst_descriptor)
else:
src_class_name, src_method_name, src_descriptor = path.get_src( cm )
x["src"] = "%s %s %s" % (src_class_name, src_method_name, src_descriptor)
x["idx"] = path.get_idx()
return x
def show_Paths(vm, paths):
"""
Show paths of packages
:param vm: the object which represents the dex file
:param paths: a list of :class:`PathP` objects
"""
for path in paths:
show_Path( vm, path )
def get_Paths(vm, paths):
"""
Return paths of packages
:param vm: the object which represents the dex file
:param paths: a list of :class:`PathP` objects
"""
full_paths = []
for path in paths:
full_paths.append(get_Path( vm, path ))
return full_paths
def show_PathVariable(vm, paths):
for path in paths:
access, idx = path[0]
m_idx = path[1]
method = vm.get_cm_method(m_idx)
print "%s %x %s->%s %s" % (access, idx, method[0], method[1], method[2][0] + method[2][1])
class PathP(object):
def __init__(self, access, idx, src_idx, dst_idx):
self.access_flag = access
self.idx = idx
self.src_idx = src_idx
self.dst_idx = dst_idx
def get_access_flag(self):
return self.access_flag
def get_dst(self, cm):
method = cm.get_method_ref(self.dst_idx)
return method.get_class_name(), method.get_name(), method.get_descriptor()
def get_src(self, cm):
method = cm.get_method_ref(self.src_idx)
return method.get_class_name(), method.get_name(), method.get_descriptor()
def get_idx(self):
return self.idx
def get_src_idx(self):
return self.src_idx
def get_dst_idx(self):
return self.dst_idx
class TaintedPackage(object):
def __init__(self, vm, name):
self.vm = vm
self.name = name
self.paths = {TAINTED_PACKAGE_CREATE : [], TAINTED_PACKAGE_CALL : []}
def get_name(self):
return self.name
def gets(self):
return self.paths
def push(self, access, idx, src_idx, dst_idx):
p = PathP( access, idx, src_idx, dst_idx )
self.paths[ access ].append( p )
return p
def get_objects_paths(self):
return self.paths[ TAINTED_PACKAGE_CREATE ]
def search_method(self, name, descriptor):
"""
@param name : a regexp for the name of the method
@param descriptor : a regexp for the descriptor of the method
@rtype : a list of called paths
"""
l = []
m_name = re.compile(name)
m_descriptor = re.compile(descriptor)
for path in self.paths[ TAINTED_PACKAGE_CALL ]:
_, dst_name, dst_descriptor = path.get_dst(self.vm.get_class_manager())
if m_name.match( dst_name ) != None and m_descriptor.match( dst_descriptor ) != None:
l.append( path )
return l
def get_method(self, name, descriptor):
l = []
for path in self.paths[ TAINTED_PACKAGE_CALL ]:
if path.get_name() == name and path.get_descriptor() == descriptor:
l.append( path )
return l
def get_paths(self):
for i in self.paths:
for j in self.paths[ i ]:
yield j
def get_paths_length(self):
x = 0
for i in self.paths:
x += len(self.paths[ i ])
return x
def get_methods(self):
return [path for path in self.paths[TAINTED_PACKAGE_CALL]]
def get_new(self):
return [path for path in self.paths[TAINTED_PACKAGE_CREATE]]
def show(self):
cm = self.vm.get_class_manager()
print self.get_name()
for _type in self.paths:
print "\t -->", _type
if _type == TAINTED_PACKAGE_CALL:
for path in self.paths[_type]:
print "\t\t => %s <-- %x in %s" % (path.get_dst(cm), path.get_idx(), path.get_src(cm))
else:
for path in self.paths[_type]:
print "\t\t => %x in %s" % (path.get_idx(), path.get_src(cm))
def show_Permissions(dx):
"""
Show where permissions are used in a specific application
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
"""
p = dx.get_permissions( [] )
for i in p:
print i, ":"
for j in p[i]:
show_Path( dx.get_vm(), j )
def show_DynCode(dx):
"""
Show where dynamic code is used
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
"""
paths = []
paths.extend(dx.get_tainted_packages().search_methods("Ldalvik/system/BaseDexClassLoader;",
"<init>",
"."))
paths.extend(dx.get_tainted_packages().search_methods("Ldalvik/system/PathClassLoader;",
"<init>",
"."))
paths.extend(dx.get_tainted_packages().search_methods("Ldalvik/system/DexClassLoader;",
"<init>",
"."))
paths.extend(dx.get_tainted_packages().search_methods("Ldalvik/system/DexFile;",
"<init>",
"."))
paths.extend(dx.get_tainted_packages().search_methods("Ldalvik/system/DexFile;",
"loadDex",
"."))
show_Paths( dx.get_vm(), paths )
def show_NativeMethods(dx):
"""
Show the native methods
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
"""
print get_NativeMethods(dx)
def show_ReflectionCode(dx):
"""
Show the reflection code
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
"""
paths = dx.get_tainted_packages().search_methods("Ljava/lang/reflect/Method;", ".", ".")
show_Paths(dx.get_vm(), paths)
def get_NativeMethods(dx):
"""
Return the native methods
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
:rtype: [tuple]
"""
d = dx.get_vm()
native_methods = []
for i in d.get_methods():
if i.get_access_flags() & 0x100:
native_methods.append(
(i.get_class_name(), i.get_name(), i.get_descriptor()))
return native_methods
def get_ReflectionCode(dx):
"""
Return the reflection code
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
:rtype: [dict]
"""
paths = dx.get_tainted_packages().search_methods(
"Ljava/lang/reflect/Method;", ".", ".")
return get_Paths(dx.get_vm(), paths)
def is_crypto_code(dx):
"""
Crypto code is present ?
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
:rtype: boolean
"""
if dx.get_tainted_packages().search_methods("Ljavax/crypto/.",
".",
"."):
return True
if dx.get_tainted_packages().search_methods("Ljava/security/spec/.",
".",
"."):
return True
return False
def is_dyn_code(dx):
"""
Dalvik Dynamic code loading is present ?
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
:rtype: boolean
"""
if dx.get_tainted_packages().search_methods("Ldalvik/system/BaseDexClassLoader;",
"<init>",
"."):
return True
if dx.get_tainted_packages().search_methods("Ldalvik/system/PathClassLoader;",
"<init>",
"."):
return True
if dx.get_tainted_packages().search_methods("Ldalvik/system/DexClassLoader;",
"<init>",
"."):
return True
if dx.get_tainted_packages().search_methods("Ldalvik/system/DexFile;",
"<init>",
"."):
return True
if dx.get_tainted_packages().search_methods("Ldalvik/system/DexFile;",
"loadDex",
"."):
return True
return False
def is_reflection_code(dx):
"""
Reflection is present ?
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
:rtype: boolean
"""
if dx.get_tainted_packages().search_methods("Ljava/lang/reflect/Method;",
".",
"."):
return True
if dx.get_tainted_packages().search_methods("Ljava/lang/reflect/Field;",
".",
"."):
return True
if dx.get_tainted_packages().search_methods("Ljava/lang/Class;",
"forName",
"."):
return True
return False
def is_native_code(dx):
"""
Native code is present ?
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
:rtype: boolean
"""
if dx.get_tainted_packages().search_methods("Ljava/lang/System;",
"load.",
"."):
return True
if dx.get_tainted_packages().search_methods("Ljava/lang/Runtime;",
"load.",
"."):
return True
return False
class TaintedPackages(object):
def __init__(self, _vm):
self.__vm = _vm
self.__packages = {}
self.__methods = {}
self.AOSP_PERMISSIONS_MODULE = load_api_specific_resource_module("aosp_permissions", self.__vm.get_api_version())
self.API_PERMISSION_MAPPINGS_MODULE = load_api_specific_resource_module("api_permission_mappings", self.__vm.get_api_version())
def _add_pkg(self, name):
if name not in self.__packages:
self.__packages[ name ] = TaintedPackage( self.__vm, name )
#self.context.get_tainted_packages().push_info( method_info[0], TAINTED_PACKAGE_CALL, idx, self, self.method, method_info[1], method_info[2][0] + method_info[2][1] )
def push_info(self, class_name, access, idx, method, idx_method):
self._add_pkg( class_name )
p = self.__packages[ class_name ].push( access, idx, method.get_method_idx(), idx_method )
try:
self.__methods[ method ][ class_name ].append( p )
except:
try:
self.__methods[ method ][ class_name ] = []
except:
self.__methods[ method ] = {}
self.__methods[ method ][ class_name ] = []
self.__methods[ method ][ class_name ].append( p )
def get_packages_by_method(self, method):
try:
return self.__methods[method]
except KeyError:
return {}
def get_package(self, name):
return self.__packages[name]
def get_packages_by_bb(self, bb):
"""
:rtype: return a list of packaged used in a basic block
"""
l = []
for i in self.__packages:
paths = self.__packages[i].gets()
for j in paths:
for k in paths[j]:
if k.get_bb() == bb:
l.append( (i, k.get_access_flag(), k.get_idx(), k.get_method()) )
return l
def get_packages(self):
for i in self.__packages:
yield self.__packages[i], i
def get_internal_packages_from_package(self, package):
classes = self.__vm.get_classes_names()
l = []
for m, _ in self.get_packages():
paths = m.get_methods()
for j in paths:
src_class_name, _, _ = j.get_src(self.__vm.get_class_manager())
dst_class_name, _, _ = j.get_dst(self.__vm.get_class_manager())
if src_class_name == package and dst_class_name in classes:
l.append(j)
return l
def get_internal_packages(self):
"""
:rtype: return a list of the internal packages called in the application
"""
classes = self.__vm.get_classes_names()
l = []
for m, _ in self.get_packages():
paths = m.get_methods()
for j in paths:
if j.get_access_flag() == TAINTED_PACKAGE_CALL:
dst_class_name, _, _ = j.get_dst(self.__vm.get_class_manager())
if dst_class_name in classes and m.get_name() in classes:
l.append(j)
return l
def get_internal_new_packages(self):
"""
:rtype: return a list of the internal packages created in the application
"""
classes = self.__vm.get_classes_names()
l = {}
for m, _ in self.get_packages():
paths = m.get_new()
for j in paths:
src_class_name, _, _ = j.get_src(self.__vm.get_class_manager())
if src_class_name in classes and m.get_name() in classes:
if j.get_access_flag() == TAINTED_PACKAGE_CREATE:
try:
l[m.get_name()].append(j)
except:
l[m.get_name()] = []
l[m.get_name()].append(j)
return l
def get_external_packages(self):
"""
:rtype: return a list of the external packages called in the application
"""
classes = self.__vm.get_classes_names()
l = []
for m, _ in self.get_packages():
paths = m.get_methods()
for j in paths:
src_class_name, _, _ = j.get_src(self.__vm.get_class_manager())
dst_class_name, _, _ = j.get_dst(self.__vm.get_class_manager())
if src_class_name in classes and dst_class_name not in classes:
if j.get_access_flag() == TAINTED_PACKAGE_CALL:
l.append(j)
return l
def search_packages(self, package_name):
"""
:param package_name: a regexp for the name of the package
:rtype: a list of called packages' paths
"""
ex = re.compile(package_name)
l = []
for m, _ in self.get_packages():
if ex.search(m.get_name()) != None:
l.extend(m.get_methods())
return l
def search_unique_packages(self, package_name):
"""
:param package_name: a regexp for the name of the package
"""
ex = re.compile( package_name )
l = []
d = {}
for m, _ in self.get_packages():
if ex.match( m.get_info() ) != None:
for path in m.get_methods():
try:
d[ path.get_class_name() + path.get_name() + path.get_descriptor() ] += 1
except KeyError:
d[ path.get_class_name() + path.get_name() + path.get_descriptor() ] = 0
l.append( [ path.get_class_name(), path.get_name(), path.get_descriptor() ] )
return l, d
def search_methods(self, class_name, name, descriptor, re_expr=True):
"""
@param class_name : a regexp for the class name of the method (the package)
@param name : a regexp for the name of the method
@param descriptor : a regexp for the descriptor of the method
@rtype : a list of called methods' paths
"""
l = []
if re_expr == True:
ex = re.compile( class_name )
for m, _ in self.get_packages():
if ex.search( m.get_name() ) != None:
l.extend( m.search_method( name, descriptor ) )
return l
def search_objects(self, class_name):
"""
@param class_name : a regexp for the class name
@rtype : a list of created objects' paths
"""
ex = re.compile( class_name )
l = []
for m, _ in self.get_packages():
if ex.search( m.get_name() ) != None:
l.extend( m.get_objects_paths() )
return l
def search_crypto_packages(self):
"""
@rtype : a list of called crypto packages
"""
return self.search_packages( "Ljavax/crypto/" )
def search_telephony_packages(self):
"""
@rtype : a list of called telephony packages
"""
return self.search_packages( "Landroid/telephony/" )
def search_net_packages(self):
"""
@rtype : a list of called net packages
"""
return self.search_packages( "Landroid/net/" )
def get_method(self, class_name, name, descriptor):
try:
return self.__packages[ class_name ].get_method( name, descriptor )
except KeyError:
return []
def get_permissions_method(self, method):
permissions = set()
for m, _ in self.get_packages():
paths = m.get_methods()
for j in paths:
if j.get_method() == method:
if j.get_access_flag() == TAINTED_PACKAGE_CALL:
dst_class_name, dst_method_name, dst_descriptor = j.get_dst( self.__vm.get_class_manager() )
data = "%s-%s-%s" % (dst_class_name, dst_method_name, dst_descriptor)
if data in self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_METHODS"].keys():
permissions.update(self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_METHODS"][data])
return permissions
def get_permissions(self, permissions_needed):
"""
@param permissions_needed : a list of restricted permissions to get ([] returns all permissions)
@rtype : a dictionnary of permissions' paths
"""
permissions = {}
pn = set(permissions_needed)
if permissions_needed == []:
pn = set(self.AOSP_PERMISSIONS_MODULE["AOSP_PERMISSIONS"].keys())
classes = self.__vm.get_classes_names()
for m, _ in self.get_packages():
paths = m.get_methods()
for j in paths:
src_class_name, src_method_name, src_descriptor = j.get_src( self.__vm.get_class_manager() )
dst_class_name, dst_method_name, dst_descriptor = j.get_dst( self.__vm.get_class_manager() )
if (src_class_name in classes) and (dst_class_name not in classes):
if j.get_access_flag() == TAINTED_PACKAGE_CALL:
data = "%s-%s-%s" % (dst_class_name, dst_method_name, dst_descriptor)
if data in self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_METHODS"].keys():
perm_intersection = pn.intersection(self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_METHODS"][data])
for p in perm_intersection:
try:
permissions[p].append(j)
except KeyError:
permissions[p] = []
permissions[p].append(j)
return permissions
class Enum(object):
def __init__(self, names):
self.names = names
for value, name in enumerate(self.names):
setattr(self, name.upper(), value)
def tuples(self):
return tuple(enumerate(self.names))
TAG_ANDROID = Enum([ 'ANDROID', 'TELEPHONY', 'SMS', 'SMSMESSAGE', 'ACCESSIBILITYSERVICE', 'ACCOUNTS',
'ANIMATION', 'APP', 'BLUETOOTH', 'CONTENT', 'DATABASE', 'DEBUG', 'DRM', 'GESTURE',
'GRAPHICS', 'HARDWARE', 'INPUTMETHODSERVICE', 'LOCATION', 'MEDIA', 'MTP',
'NET', 'NFC', 'OPENGL', 'OS', 'PREFERENCE', 'PROVIDER', 'RENDERSCRIPT',
'SAX', 'SECURITY', 'SERVICE', 'SPEECH', 'SUPPORT', 'TEST', 'TEXT', 'UTIL',
'VIEW', 'WEBKIT', 'WIDGET', 'DALVIK_BYTECODE', 'DALVIK_SYSTEM', 'JAVA_REFLECTION'])
TAG_REVERSE_ANDROID = dict((i[0], i[1]) for i in TAG_ANDROID.tuples())
TAGS_ANDROID = { TAG_ANDROID.ANDROID : [ 0, "Landroid" ],
TAG_ANDROID.TELEPHONY : [ 0, "Landroid/telephony"],
TAG_ANDROID.SMS : [ 0, "Landroid/telephony/SmsManager"],
TAG_ANDROID.SMSMESSAGE : [ 0, "Landroid/telephony/SmsMessage"],
TAG_ANDROID.DEBUG : [ 0, "Landroid/os/Debug"],
TAG_ANDROID.ACCESSIBILITYSERVICE : [ 0, "Landroid/accessibilityservice" ],
TAG_ANDROID.ACCOUNTS : [ 0, "Landroid/accounts" ],
TAG_ANDROID.ANIMATION : [ 0, "Landroid/animation" ],
TAG_ANDROID.APP : [ 0, "Landroid/app" ],
TAG_ANDROID.BLUETOOTH : [ 0, "Landroid/bluetooth" ],
TAG_ANDROID.CONTENT : [ 0, "Landroid/content" ],
TAG_ANDROID.DATABASE : [ 0, "Landroid/database" ],
TAG_ANDROID.DRM : [ 0, "Landroid/drm" ],
TAG_ANDROID.GESTURE : [ 0, "Landroid/gesture" ],
TAG_ANDROID.GRAPHICS : [ 0, "Landroid/graphics" ],
TAG_ANDROID.HARDWARE : [ 0, "Landroid/hardware" ],
TAG_ANDROID.INPUTMETHODSERVICE : [ 0, "Landroid/inputmethodservice" ],
TAG_ANDROID.LOCATION : [ 0, "Landroid/location" ],
TAG_ANDROID.MEDIA : [ 0, "Landroid/media" ],
TAG_ANDROID.MTP : [ 0, "Landroid/mtp" ],
TAG_ANDROID.NET : [ 0, "Landroid/net" ],
TAG_ANDROID.NFC : [ 0, "Landroid/nfc" ],
TAG_ANDROID.OPENGL : [ 0, "Landroid/opengl" ],
TAG_ANDROID.OS : [ 0, "Landroid/os" ],
TAG_ANDROID.PREFERENCE : [ 0, "Landroid/preference" ],
TAG_ANDROID.PROVIDER : [ 0, "Landroid/provider" ],
TAG_ANDROID.RENDERSCRIPT : [ 0, "Landroid/renderscript" ],
TAG_ANDROID.SAX : [ 0, "Landroid/sax" ],
TAG_ANDROID.SECURITY : [ 0, "Landroid/security" ],
TAG_ANDROID.SERVICE : [ 0, "Landroid/service" ],
TAG_ANDROID.SPEECH : [ 0, "Landroid/speech" ],
TAG_ANDROID.SUPPORT : [ 0, "Landroid/support" ],
TAG_ANDROID.TEST : [ 0, "Landroid/test" ],
TAG_ANDROID.TEXT : [ 0, "Landroid/text" ],
TAG_ANDROID.UTIL : [ 0, "Landroid/util" ],
TAG_ANDROID.VIEW : [ 0, "Landroid/view" ],
TAG_ANDROID.WEBKIT : [ 0, "Landroid/webkit" ],
TAG_ANDROID.WIDGET : [ 0, "Landroid/widget" ],
TAG_ANDROID.DALVIK_BYTECODE : [ 0, "Ldalvik/bytecode" ],
TAG_ANDROID.DALVIK_SYSTEM : [ 0, "Ldalvik/system" ],
TAG_ANDROID.JAVA_REFLECTION : [ 0, "Ljava/lang/reflect"],
}
class Tags(object):
"""
Handle specific tags
:param patterns:
:params reverse:
"""
def __init__(self, patterns=TAGS_ANDROID, reverse=TAG_REVERSE_ANDROID):
self.tags = set()
self.patterns = patterns
self.reverse = TAG_REVERSE_ANDROID
for i in self.patterns:
self.patterns[i][1] = re.compile(self.patterns[i][1])
def emit(self, method):
for i in self.patterns:
if self.patterns[i][0] == 0:
if self.patterns[i][1].search( method.get_class() ) != None:
self.tags.add( i )
def emit_by_classname(self, classname):
for i in self.patterns:
if self.patterns[i][0] == 0:
if self.patterns[i][1].search( classname ) != None:
self.tags.add( i )
def get_list(self):
return [ self.reverse[ i ] for i in self.tags ]
def __contains__(self, key):
return key in self.tags
def __str__(self):
return str([ self.reverse[ i ] for i in self.tags ])
def empty(self):
return self.tags == set()
class BasicBlocks(object):
"""
This class represents all basic blocks of a method
"""
def __init__(self, _vm, tv):
self.__vm = _vm
self.tainted = tv
self.bb = []
def push(self, bb):
self.bb.append(bb)
def pop(self, idx):
return self.bb.pop(idx)
def get_basic_block(self, idx):
for i in self.bb:
if idx >= i.get_start() and idx < i.get_end():
return i
return None
def get_tainted_integers(self):
try:
return self.tainted.get_tainted_integers()
except:
return None
def get_tainted_packages(self):
try:
return self.tainted.get_tainted_packages()
except:
return None
def get_tainted_variables(self):
try:
return self.tainted.get_tainted_variables()
except:
return None
def get(self):
"""
:rtype: return each basic block (:class:`DVMBasicBlock` object)
"""
for i in self.bb:
yield i
def gets(self):
"""
:rtype: a list of basic blocks (:class:`DVMBasicBlock` objects)
"""
return self.bb
def get_basic_block_pos(self, idx):
return self.bb[idx]
class ExceptionAnalysis(object):
def __init__(self, exception, bb):
self.start = exception[0]
self.end = exception[1]
self.exceptions = exception[2:]
for i in self.exceptions:
i.append(bb.get_basic_block(i[1]))
def show_buff(self):
buff = "%x:%x\n" % (self.start, self.end)
for i in self.exceptions:
if i[2] == None:
buff += "\t(%s -> %x %s)\n" % (i[0], i[1], i[2])
else:
buff += "\t(%s -> %x %s)\n" % (i[0], i[1], i[2].get_name())
return buff[:-1]
def get(self):
d = {"start": self.start, "end": self.end, "list": []}
for i in self.exceptions:
d["list"].append({"name": i[0], "idx": i[1], "bb": i[2].get_name()})
return d
class Exceptions(object):
def __init__(self, _vm):
self.__vm = _vm
self.exceptions = []
def add(self, exceptions, basic_blocks):
for i in exceptions:
self.exceptions.append( ExceptionAnalysis( i, basic_blocks ) )
def get_exception(self, addr_start, addr_end):
for i in self.exceptions:
# print hex(i.start), hex(i.end), hex(addr_start), hex(addr_end), i.start >= addr_start and i.end <= addr_end, addr_end <= i.end and addr_start >= i.start
if i.start >= addr_start and i.end <= addr_end:
return i
elif addr_end <= i.end and addr_start >= i.start:
return i
return None
def gets(self):
return self.exceptions
def get(self):
for i in self.exceptions:
yield i
BO = { "BasicOPCODES" : dvm.BRANCH_DVM_OPCODES, "BasicClass" : DVMBasicBlock, "Dnext" : dvm.determineNext, "Dexception" : dvm.determineException }
BO["BasicOPCODES_H"] = []
for i in BO["BasicOPCODES"]:
BO["BasicOPCODES_H"].append( re.compile( i ) )
class MethodAnalysis(object):
"""
This class analyses in details a method of a class/dex file
:param vm: the object which represent the dex file
:param method: the original method
:param tv: a virtual object to get access to tainted information
:type vm: a :class:`DalvikVMFormat` object
:type method: a :class:`EncodedMethod` object
"""
def __init__(self, vm, method, tv):
self.__vm = vm
self.method = method
self.tainted = tv
self.basic_blocks = BasicBlocks(self.__vm, self.tainted)
self.exceptions = Exceptions(self.__vm)
code = self.method.get_code()
if code == None:
return
current_basic = BO["BasicClass"](0, self.__vm, self.method, self.basic_blocks)
self.basic_blocks.push(current_basic)
##########################################################
bc = code.get_bc()
l = []
h = {}
idx = 0
debug("Parsing instructions")
instructions = [i for i in bc.get_instructions()]
for i in instructions:
for j in BO["BasicOPCODES_H"]:
if j.match(i.get_name()) != None:
v = BO["Dnext"](i, idx, self.method)
h[ idx ] = v
l.extend(v)
break
idx += i.get_length()
debug("Parsing exceptions")
excepts = BO["Dexception"]( self.__vm, self.method )
for i in excepts:
l.extend( [i[0]] )
for handler in i[2:]:
l.append( handler[1] )
debug("Creating basic blocks in %s" % self.method)
idx = 0
for i in instructions:
# index is a destination
if idx in l:
if current_basic.get_nb_instructions() != 0:
current_basic = BO["BasicClass"](current_basic.get_end(), self.__vm, self.method, self.basic_blocks)
self.basic_blocks.push(current_basic)
current_basic.push(i)
# index is a branch instruction
if idx in h:
current_basic = BO["BasicClass"]( current_basic.get_end(), self.__vm, self.method, self.basic_blocks )
self.basic_blocks.push( current_basic )
idx += i.get_length()
if current_basic.get_nb_instructions() == 0:
self.basic_blocks.pop(-1)
debug("Settings basic blocks childs")
for i in self.basic_blocks.get():
try:
i.set_childs( h[ i.end - i.get_last_length() ] )
except KeyError:
i.set_childs( [] )
debug("Creating exceptions")
# Create exceptions
self.exceptions.add(excepts, self.basic_blocks)
for i in self.basic_blocks.get():
# setup exception by basic block
i.set_exception_analysis(self.exceptions.get_exception( i.start, i.end - 1 ))
del instructions
del h, l
def get_basic_blocks(self):
"""
:rtype: a :class:`BasicBlocks` object
"""
return self.basic_blocks
def get_length(self):
"""
:rtype: an integer which is the length of the code
"""
return self.get_code().get_length()
def get_vm(self):
return self.__vm
def get_method(self):
return self.method
def get_local_variables(self):
return self.tainted.get_tainted_variables().get_local_variables( self.method )
def show(self):
print "METHOD", self.method.get_class_name(), self.method.get_name(), self.method.get_descriptor()
for i in self.basic_blocks.get():
print "\t", i
i.show()
print ""
def show_methods(self):
print "\t #METHODS :"
for i in self.__bb:
methods = i.get_methods()
for method in methods:
print "\t\t-->", method.get_class_name(), method.get_name(), method.get_descriptor()
for context in methods[method]:
print "\t\t\t |---|", context.details
def create_tags(self):
"""
Create the tags for the method
"""
self.tags = Tags()
for i in self.tainted.get_tainted_packages().get_packages_by_method( self.method ):
self.tags.emit_by_classname( i )
def get_tags(self):
"""
Return the tags of the method
:rtype: a :class:`Tags` object
"""
return self.tags
SIGNATURE_L0_0 = "L0_0"
SIGNATURE_L0_1 = "L0_1"
SIGNATURE_L0_2 = "L0_2"
SIGNATURE_L0_3 = "L0_3"
SIGNATURE_L0_4 = "L0_4"
SIGNATURE_L0_5 = "L0_5"
SIGNATURE_L0_6 = "L0_6"
SIGNATURE_L0_0_L1 = "L0_0:L1"
SIGNATURE_L0_1_L1 = "L0_1:L1"
SIGNATURE_L0_2_L1 = "L0_2:L1"
SIGNATURE_L0_3_L1 = "L0_3:L1"
SIGNATURE_L0_4_L1 = "L0_4:L1"
SIGNATURE_L0_5_L1 = "L0_5:L1"
SIGNATURE_L0_0_L2 = "L0_0:L2"
SIGNATURE_L0_0_L3 = "L0_0:L3"
SIGNATURE_HEX = "hex"
SIGNATURE_SEQUENCE_BB = "sequencebb"
SIGNATURES = {
SIGNATURE_L0_0 : { "type" : 0 },
SIGNATURE_L0_1 : { "type" : 1 },
SIGNATURE_L0_2 : { "type" : 2, "arguments" : ["Landroid"] },
SIGNATURE_L0_3 : { "type" : 2, "arguments" : ["Ljava"] },
SIGNATURE_L0_4 : { "type" : 2, "arguments" : ["Landroid", "Ljava"] },
SIGNATURE_L0_5 : { "type" : 3, "arguments" : ["Landroid"] },
SIGNATURE_L0_6 : { "type" : 3, "arguments" : ["Ljava"] },
SIGNATURE_SEQUENCE_BB : {},
SIGNATURE_HEX : {},
}
from sign import Signature
class StringAnalysis(object):
def __init__(self, value):
self.value = value
self.xreffrom = set()
def AddXrefFrom(self, classobj, methodobj):
#debug("Added strings xreffrom for %s to %s" % (self.value, methodobj))
self.xreffrom.add((classobj, methodobj))
def get_xref_from(self):
return self.xreffrom
def __str__(self):
data = "XREFto for string %s in\n" % repr(self.value)
for ref_class, ref_method in self.xreffrom:
data += "%s:%s\n" % (ref_class.get_vm_class().get_name(), ref_method)
return data
class MethodClassAnalysis(object):
def __init__(self, method):
self.method = method
self.xrefto = set()
self.xreffrom = set()
def AddXrefTo(self, classobj, methodobj):
#debug("Added method xrefto for %s [%s] to %s" % (self.method, classobj, methodobj))
self.xrefto.add((classobj, methodobj))
def AddXrefFrom(self, classobj, methodobj):
#debug("Added method xreffrom for %s [%s] to %s" % (self.method, classobj, methodobj))
self.xreffrom.add((classobj, methodobj))
def get_xref_from(self):
return self.xreffrom
def get_xref_to(self):
return self.xrefto
def __str__(self):
data = "XREFto for %s\n" % self.method
for ref_class, ref_method in self.xrefto:
data += "in\n"
data += "%s:%s\n" % (ref_class.get_vm_class().get_name(), ref_method)
data += "XREFFrom for %s\n" % self.method
for ref_class, ref_method in self.xreffrom:
data += "in\n"
data += "%s:%s\n" % (ref_class.get_vm_class().get_name(), ref_method)
return data
class FieldClassAnalysis(object):
def __init__(self, field):
self.field = field
self.xrefread = set()
self.xrefwrite = set()
def AddXrefRead(self, classobj, methodobj):
#debug("Added method xrefto for %s [%s] to %s" % (self.method, classobj, methodobj))
self.xrefread.add((classobj, methodobj))
def AddXrefWrite(self, classobj, methodobj):
#debug("Added method xreffrom for %s [%s] to %s" % (self.method, classobj, methodobj))
self.xrefwrite.add((classobj, methodobj))
def get_xref_read(self):
return self.xrefread
def get_xref_write(self):
return self.xrefwrite
def __str__(self):
data = "XREFRead for %s\n" % self.field
for ref_class, ref_method in self.xrefread:
data += "in\n"
data += "%s:%s\n" % (ref_class.get_vm_class().get_name(), ref_method)
data += "XREFWrite for %s\n" % self.field
for ref_class, ref_method in self.xrefwrite:
data += "in\n"
data += "%s:%s\n" % (ref_class.get_vm_class().get_name(), ref_method)
return data
REF_NEW_INSTANCE = 0
REF_CLASS_USAGE = 1
class ClassAnalysis(object):
def __init__(self, classobj):
self._class = classobj
self._methods = {}
self._fields = {}
self.xrefto = collections.defaultdict(set)
self.xreffrom = collections.defaultdict(set)
def get_method_analysis(self, method):
return self._methods.get(method)
def get_field_analysis(self, field):
return self._fields.get(field)
def AddFXrefRead(self, method, classobj, field):
if field not in self._fields:
self._fields[field] = FieldClassAnalysis(field)
self._fields[field].AddXrefRead(classobj, method)
def AddFXrefWrite(self, method, classobj, field):
if field not in self._fields:
self._fields[field] = FieldClassAnalysis(field)
self._fields[field].AddXrefWrite(classobj, method)
def AddMXrefTo(self, method1, classobj, method2):
if method1 not in self._methods:
self._methods[method1] = MethodClassAnalysis(method1)
self._methods[method1].AddXrefTo(classobj, method2)
def AddMXrefFrom(self, method1, classobj, method2):
if method1 not in self._methods:
self._methods[method1] = MethodClassAnalysis(method1)
self._methods[method1].AddXrefFrom(classobj, method2)
def AddXrefTo(self, ref_kind, classobj, methodobj):
#debug("Added class xrefto for %s to %s" % (self._class.get_name(), classobj.get_vm_class().get_name()))
self.xrefto[classobj].add((ref_kind, methodobj))
def AddXrefFrom(self, ref_kind, classobj, methodobj):
#debug("Added class xreffrom for %s to %s" % (self._class.get_name(), classobj.get_vm_class().get_name()))
self.xreffrom[classobj].add((ref_kind, methodobj))
def get_xref_from(self):
return self.xreffrom
def get_xref_to(self):
return self.xrefto
def get_vm_class(self):
return self._class
def __str__(self):
data = "XREFto for %s\n" % self._class
for ref_class in self.xrefto:
data += str(ref_class.get_vm_class().get_name()) + " "
data += "in\n"
for ref_kind, ref_method in self.xrefto[ref_class]:
data += "%d %s\n" % (ref_kind, ref_method)
data += "\n"
data += "XREFFrom for %s\n" % self._class
for ref_class in self.xreffrom:
data += str(ref_class.get_vm_class().get_name()) + " "
data += "in\n"
for ref_kind, ref_method in self.xreffrom[ref_class]:
data += "%d %s\n" % (ref_kind, ref_method)
data += "\n"
return data
class newVMAnalysis(object):
def __init__(self, vm):
self.vm = vm
self.classes = {}
self.strings = {}
for current_class in self.vm.get_classes():
self.classes[current_class.get_name()] = ClassAnalysis(current_class)
def create_xref(self):
debug("Creating XREF/DREF")
instances_class_name = self.classes.keys()
external_instances = {}
for current_class in self.vm.get_classes():
for current_method in current_class.get_methods():
debug("Creating XREF for %s" % current_method)
code = current_method.get_code()
if code == None:
continue
off = 0
bc = code.get_bc()
for instruction in bc.get_instructions():
op_value = instruction.get_op_value()
if op_value in [0x1c, 0x22]:
idx_type = instruction.get_ref_kind()
type_info = self.vm.get_cm_type(idx_type)
# Internal xref related to class manipulation
if type_info in instances_class_name and type_info != current_class.get_name():
# new instance
if op_value == 0x22:
self.classes[current_class.get_name()].AddXrefTo(REF_NEW_INSTANCE, self.classes[type_info], current_method)
self.classes[type_info].AddXrefFrom(REF_NEW_INSTANCE, self.classes[current_class.get_name()], current_method)
# class reference
else:
self.classes[current_class.get_name()].AddXrefTo(REF_CLASS_USAGE, self.classes[type_info], current_method)
self.classes[type_info].AddXrefFrom(REF_CLASS_USAGE, self.classes[current_class.get_name()], current_method)
elif ((op_value >= 0x6e and op_value <= 0x72) or
(op_value >= 0x74 and op_value <= 0x78)):
idx_meth = instruction.get_ref_kind()
method_info = self.vm.get_cm_method(idx_meth)
if method_info:
class_info = method_info[0]
method_item = self.vm.get_method_descriptor(method_info[0], method_info[1], ''.join(method_info[2]))
if method_item:
self.classes[current_class.get_name()].AddMXrefTo(current_method, self.classes[class_info], method_item)
self.classes[class_info].AddMXrefFrom(method_item, self.classes[current_class.get_name()], current_method)
# Internal xref related to class manipulation
if class_info in instances_class_name and class_info != current_class.get_name():
self.classes[current_class.get_name()].AddXrefTo(REF_CLASS_USAGE, self.classes[class_info], method_item)
self.classes[class_info].AddXrefFrom(REF_CLASS_USAGE, self.classes[current_class.get_name()], current_method)
elif op_value >= 0x1a and op_value <= 0x1b:
string_value = self.vm.get_cm_string(instruction.get_ref_kind())
if string_value not in self.strings:
self.strings[string_value] = StringAnalysis(string_value)
self.strings[string_value].AddXrefFrom(self.classes[current_class.get_name()], current_method)
elif op_value >= 0x52 and op_value <= 0x6d:
idx_field = instruction.get_ref_kind()
field_info = self.vm.get_cm_field(idx_field)
field_item = self.vm.get_field_descriptor(field_info[0], field_info[2], field_info[1])
if field_item:
# read access to a field
if (op_value >= 0x52 and op_value <= 0x58) or (op_value >= 0x60 and op_value <= 0x66):
self.classes[current_class.get_name()].AddFXrefRead(current_method, self.classes[current_class.get_name()], field_item)
# write access to a field
else:
self.classes[current_class.get_name()].AddFXrefWrite(current_method, self.classes[current_class.get_name()], field_item)
off += instruction.get_length()
def get_method(self, method):
return MethodAnalysis( self.vm, method, None )
def get_method_by_name(self, class_name, method_name, method_descriptor):
print class_name, method_name, method_descriptor
if class_name in self.classes:
for method in self.classes[class_name].get_vm_class().get_methods():
print method.get_name(), method.get_descriptor()
if method.get_name() == method_name and method.get_descriptor() == method_descriptor:
return method
return None
def is_class_present(self, class_name):
return class_name in self.classes
def get_class_analysis(self, class_name):
return self.classes.get(class_name)
def get_strings_analysis(self):
return self.strings
class VMAnalysis(object):
"""
This class analyses a dex file
:param _vm: the object which represent the dex file
:type _vm: a :class:`DalvikVMFormat` object
:Example:
VMAnalysis( DalvikVMFormat( read("toto.dex", binary=False) ) )
"""
def __init__(self, vm):
self.vm = vm
self.tainted_variables = TaintedVariables( self.vm )
self.tainted_packages = TaintedPackages( self.vm )
self.tainted = { "variables" : self.tainted_variables,
"packages" : self.tainted_packages,
}
self.signature = None
for i in self.vm.get_all_fields():
self.tainted_variables.add( [ i.get_class_name(), i.get_descriptor(), i.get_name() ], TAINTED_FIELD )
self.methods = []
self.hmethods = {}
self.__nmethods = {}
for i in self.vm.get_methods():
x = MethodAnalysis( self.vm, i, self )
self.methods.append( x )
self.hmethods[ i ] = x
self.__nmethods[ i.get_name() ] = x
def get_vm(self):
return self.vm
def get_method(self, method):
"""
Return an analysis method
:param method: a classical method object
:type method: an :class:`EncodedMethod` object
:rtype: a :class:`MethodAnalysis` object
"""
return self.hmethods[ method ]
def get_methods(self):
"""
Return each analysis method
:rtype: a :class:`MethodAnalysis` object
"""
for i in self.hmethods:
yield self.hmethods[i]
def get_method_signature(self, method, grammar_type="", options={}, predef_sign=""):
"""
Return a specific signature for a specific method
:param method: a reference to method from a vm class
:type method: a :class:`EncodedMethod` object
:param grammar_type: the type of the signature (optional)
:type grammar_type: string
:param options: the options of the signature (optional)
:param options: dict
:param predef_sign: used a predefined signature (optional)
:type predef_sign: string
:rtype: a :class:`Sign` object
"""
if self.signature == None:
self.signature = Signature( self )
if predef_sign != "":
g = ""
o = {}
for i in predef_sign.split(":"):
if "_" in i:
g += "L0:"
o[ "L0" ] = SIGNATURES[ i ]
else:
g += i
g += ":"
return self.signature.get_method( self.get_method( method ), g[:-1], o )
else:
return self.signature.get_method( self.get_method( method ), grammar_type, options )
def get_permissions(self, permissions_needed):
"""
Return the permissions used
:param permissions_needed: a list of restricted permissions to get ([] returns all permissions)
:type permissions_needed: list
:rtype: a dictionnary of permissions paths
"""
permissions = {}
permissions.update( self.get_tainted_packages().get_permissions( permissions_needed ) )
permissions.update( self.get_tainted_variables().get_permissions( permissions_needed ) )
return permissions
def get_permissions_method(self, method):
permissions_f = self.get_tainted_packages().get_permissions_method( method )
permissions_v = self.get_tainted_variables().get_permissions_method( method )
all_permissions_of_method = permissions_f.union(permissions_v)
return list(all_permissions_of_method)
def get_tainted_variables(self):
"""
Return the tainted variables
:rtype: a :class:`TaintedVariables` object
"""
return self.tainted_variables
def get_tainted_packages(self):
"""
Return the tainted packages
:rtype: a :class:`TaintedPackages` object
"""
return self.tainted_packages
def get_tainted_fields(self):
return self.get_tainted_variables().get_fields()
def get_tainted_field(self, class_name, name, descriptor):
"""
Return a specific tainted field
:param class_name: the name of the class
:param name: the name of the field
:param descriptor: the descriptor of the field
:type class_name: string
:type name: string
:type descriptor: string
:rtype: a :class:`TaintedVariable` object
"""
return self.get_tainted_variables().get_field( class_name, name, descriptor )
class uVMAnalysis(VMAnalysis):
"""
This class analyses a dex file but on the fly (quicker !)
:param _vm: the object which represent the dex file
:type _vm: a :class:`DalvikVMFormat` object
:Example:
uVMAnalysis( DalvikVMFormat( read("toto.dex", binary=False) ) )
"""
def __init__(self, vm):
self.vm = vm
self.tainted_variables = TaintedVariables( self.vm )
self.tainted_packages = TaintedPackages( self.vm )
self.tainted = { "variables" : self.tainted_variables,
"packages" : self.tainted_packages,
}
self.signature = None
self.resolve = False
def get_methods(self):
self.resolve = True
for i in self.vm.get_methods():
yield MethodAnalysis(self.vm, i, self)
def get_method(self, method):
return MethodAnalysis( self.vm, method, None )
def get_vm(self):
return self.vm
def _resolve(self):
if self.resolve == False:
for i in self.get_methods():
pass
def get_tainted_packages(self):
self._resolve()
return self.tainted_packages
def get_tainted_variables(self):
self._resolve()
return self.tainted_variables
def is_ascii_obfuscation(vm):
for classe in vm.get_classes():
if is_ascii_problem(classe.get_name()):
return True
for method in classe.get_methods():
if is_ascii_problem(method.get_name()):
return True
return False
| apache-2.0 |
marcysweber/hamadryas-social-sim | formatter.py | 1 | 1379 | class Formatter():
def __init__(self, data):
self.data = data
def format(self):
matrix = [[0] * 14 for i in range(len(self.data))]
for i in range(len(self.data)):
row = matrix[i]
row[0] = i
row[1] = self.data[i]["pop_size"]
row[2] = self.data[i]["adult_sex_ratio"]
row[3] = self.data[i]["adult_to_nonadult_ratio"]
row[4] = self.data[i]["within_omu_relat_mean"]
row[5] = self.data[i]["within_omu_relat_var"]
row[6] = self.data[i]["within_dyads"]
row[7] = self.data[i]["across_omu_relat_mean"]
row[8] = self.data[i]["across_omu_relat_var"]
row[9] = self.data[i]["across_dyads"]
row[10] = self.data[i]["initial_units"]
row[11] = self.data[i]["opportunistic_takeovers"]
row[12] = self.data[i]["inheritances"]
row[13] = self.data[i]["challenge_takeovers"]
headers = ["Rep", "Pop_Size", "Ad_Sex_Ratio", "Ad_Juv_Ratio",
"within_omu_relat_mean","within_omu_relat_var", "within_dyads_count",
"across_omu_relat_mean","across_omu_relat_var", "across_dyads_count",
"initial_units", "opportunistic_takeovers", "inheritances", "challenge_takeovers"]
matrix.insert(0, headers)
return matrix
| mit |
ryfeus/lambda-packs | Tensorflow_OpenCV_Nightly/source/tensorflow/python/ops/gen_parsing_ops.py | 3 | 24697 | """Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
"""
import collections as _collections
from google.protobuf import text_format as _text_format
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
def decode_csv(records, record_defaults, field_delim=None, name=None):
r"""Convert CSV records to tensors. Each column maps to one tensor.
RFC 4180 format is expected for the CSV records.
(https://tools.ietf.org/html/rfc4180)
Note that we allow leading and trailing spaces with int or float field.
Args:
records: A `Tensor` of type `string`.
Each string is a record/row in the csv and all records should have
the same format.
record_defaults: A list of `Tensor` objects with types from: `float32`, `int32`, `int64`, `string`.
One tensor per column of the input record, with either a
scalar default value for that column or empty if the column is required.
field_delim: An optional `string`. Defaults to `","`.
delimiter to separate fields in a record.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects. Has the same type as `record_defaults`.
Each tensor will have the same shape as records.
"""
result = _op_def_lib.apply_op("DecodeCSV", records=records,
record_defaults=record_defaults,
field_delim=field_delim, name=name)
return result
def decode_json_example(json_examples, name=None):
r"""Convert JSON-encoded Example records to binary protocol buffer strings.
This op translates a tensor containing Example records, encoded using
the [standard JSON
mapping](https://developers.google.com/protocol-buffers/docs/proto3#json),
into a tensor containing the same records encoded as binary protocol
buffers. The resulting tensor can then be fed to any of the other
Example-parsing ops.
Args:
json_examples: A `Tensor` of type `string`.
Each string is a JSON object serialized according to the JSON
mapping of the Example proto.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`.
Each string is a binary Example protocol buffer corresponding
to the respective element of `json_examples`.
"""
result = _op_def_lib.apply_op("DecodeJSONExample",
json_examples=json_examples, name=name)
return result
def decode_raw(bytes, out_type, little_endian=None, name=None):
r"""Reinterpret the bytes of a string as a vector of numbers.
Args:
bytes: A `Tensor` of type `string`.
All the elements must have the same length.
out_type: A `tf.DType` from: `tf.half, tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8, tf.int64`.
little_endian: An optional `bool`. Defaults to `True`.
Whether the input `bytes` are in little-endian order.
Ignored for `out_type` values that are stored in a single byte like
`uint8`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `out_type`.
A Tensor with one more dimension than the input `bytes`. The
added dimension will have size equal to the length of the elements
of `bytes` divided by the number of bytes to represent `out_type`.
"""
result = _op_def_lib.apply_op("DecodeRaw", bytes=bytes, out_type=out_type,
little_endian=little_endian, name=name)
return result
__parse_example_outputs = ["sparse_indices", "sparse_values", "sparse_shapes",
"dense_values"]
_ParseExampleOutput = _collections.namedtuple(
"ParseExample", __parse_example_outputs)
def _parse_example(serialized, names, sparse_keys, dense_keys, dense_defaults,
sparse_types, dense_shapes, name=None):
r"""Transforms a vector of brain.Example protos (as strings) into typed tensors.
Args:
serialized: A `Tensor` of type `string`.
A vector containing a batch of binary serialized Example protos.
names: A `Tensor` of type `string`.
A vector containing the names of the serialized protos.
May contain, for example, table key (descriptive) names for the
corresponding serialized protos. These are purely useful for debugging
purposes, and the presence of values here has no effect on the output.
May also be an empty vector if no names are available.
If non-empty, this vector must be the same length as "serialized".
sparse_keys: A list of `Tensor` objects with type `string`.
A list of Nsparse string Tensors (scalars).
The keys expected in the Examples' features associated with sparse values.
dense_keys: A list of `Tensor` objects with type `string`.
A list of Ndense string Tensors (scalars).
The keys expected in the Examples' features associated with dense values.
dense_defaults: A list of `Tensor` objects with types from: `float32`, `int64`, `string`.
A list of Ndense Tensors (some may be empty).
dense_defaults[j] provides default values
when the example's feature_map lacks dense_key[j]. If an empty Tensor is
provided for dense_defaults[j], then the Feature dense_keys[j] is required.
The input type is inferred from dense_defaults[j], even when it's empty.
If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,
then the shape of dense_defaults[j] must match that of dense_shapes[j].
If dense_shapes[j] has an undefined major dimension (variable strides dense
feature), dense_defaults[j] must contain a single element:
the padding element.
sparse_types: A list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`.
A list of Nsparse types; the data types of data in each Feature
given in sparse_keys.
Currently the ParseExample supports DT_FLOAT (FloatList),
DT_INT64 (Int64List), and DT_STRING (BytesList).
dense_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`).
A list of Ndense shapes; the shapes of data in each Feature
given in dense_keys.
The number of elements in the Feature corresponding to dense_key[j]
must always equal dense_shapes[j].NumEntries().
If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output
Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):
The dense outputs are just the inputs row-stacked by batch.
This works for dense_shapes[j] = (-1, D1, ..., DN). In this case
the shape of the output Tensor dense_values[j] will be
(|serialized|, M, D1, .., DN), where M is the maximum number of blocks
of elements of length D1 * .... * DN, across all minibatch entries
in the input. Any minibatch entry with less than M blocks of elements of
length D1 * ... * DN will be padded with the corresponding default_value
scalar element along the second dimension.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (sparse_indices, sparse_values, sparse_shapes, dense_values).
sparse_indices: A list with the same length as `sparse_keys` of `Tensor` objects with type `int64`.
sparse_values: A list of `Tensor` objects of type `sparse_types`.
sparse_shapes: A list with the same length as `sparse_keys` of `Tensor` objects with type `int64`.
dense_values: A list of `Tensor` objects. Has the same type as `dense_defaults`.
"""
result = _op_def_lib.apply_op("ParseExample", serialized=serialized,
names=names, sparse_keys=sparse_keys,
dense_keys=dense_keys,
dense_defaults=dense_defaults,
sparse_types=sparse_types,
dense_shapes=dense_shapes, name=name)
return _ParseExampleOutput._make(result)
__parse_single_sequence_example_outputs = ["context_sparse_indices",
"context_sparse_values",
"context_sparse_shapes",
"context_dense_values",
"feature_list_sparse_indices",
"feature_list_sparse_values",
"feature_list_sparse_shapes",
"feature_list_dense_values"]
_ParseSingleSequenceExampleOutput = _collections.namedtuple(
"ParseSingleSequenceExample", __parse_single_sequence_example_outputs)
def _parse_single_sequence_example(serialized,
feature_list_dense_missing_assumed_empty,
context_sparse_keys, context_dense_keys,
feature_list_sparse_keys,
feature_list_dense_keys,
context_dense_defaults, debug_name,
context_sparse_types=None,
feature_list_dense_types=None,
context_dense_shapes=None,
feature_list_sparse_types=None,
feature_list_dense_shapes=None, name=None):
r"""Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors.
Args:
serialized: A `Tensor` of type `string`.
A scalar containing a binary serialized SequenceExample proto.
feature_list_dense_missing_assumed_empty: A `Tensor` of type `string`.
A vector listing the
FeatureList keys which may be missing from the SequenceExample. If the
associated FeatureList is missing, it is treated as empty. By default,
any FeatureList not listed in this vector must exist in the SequenceExample.
context_sparse_keys: A list of `Tensor` objects with type `string`.
A list of Ncontext_sparse string Tensors (scalars).
The keys expected in the Examples' features associated with context_sparse
values.
context_dense_keys: A list of `Tensor` objects with type `string`.
A list of Ncontext_dense string Tensors (scalars).
The keys expected in the SequenceExamples' context features associated with
dense values.
feature_list_sparse_keys: A list of `Tensor` objects with type `string`.
A list of Nfeature_list_sparse string Tensors
(scalars). The keys expected in the FeatureLists associated with sparse
values.
feature_list_dense_keys: A list of `Tensor` objects with type `string`.
A list of Nfeature_list_dense string Tensors (scalars).
The keys expected in the SequenceExamples' feature_lists associated
with lists of dense values.
context_dense_defaults: A list of `Tensor` objects with types from: `float32`, `int64`, `string`.
A list of Ncontext_dense Tensors (some may be empty).
context_dense_defaults[j] provides default values
when the SequenceExample's context map lacks context_dense_key[j].
If an empty Tensor is provided for context_dense_defaults[j],
then the Feature context_dense_keys[j] is required.
The input type is inferred from context_dense_defaults[j], even when it's
empty. If context_dense_defaults[j] is not empty, its shape must match
context_dense_shapes[j].
debug_name: A `Tensor` of type `string`.
A scalar containing the name of the serialized proto.
May contain, for example, table key (descriptive) name for the
corresponding serialized proto. This is purely useful for debugging
purposes, and the presence of values here has no effect on the output.
May also be an empty scalar if no name is available.
context_sparse_types: An optional list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`. Defaults to `[]`.
A list of Ncontext_sparse types; the data types of data in
each context Feature given in context_sparse_keys.
Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
DT_INT64 (Int64List), and DT_STRING (BytesList).
feature_list_dense_types: An optional list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`. Defaults to `[]`.
context_dense_shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.
A list of Ncontext_dense shapes; the shapes of data in
each context Feature given in context_dense_keys.
The number of elements in the Feature corresponding to context_dense_key[j]
must always equal context_dense_shapes[j].NumEntries().
The shape of context_dense_values[j] will match context_dense_shapes[j].
feature_list_sparse_types: An optional list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`. Defaults to `[]`.
A list of Nfeature_list_sparse types; the data types
of data in each FeatureList given in feature_list_sparse_keys.
Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
DT_INT64 (Int64List), and DT_STRING (BytesList).
feature_list_dense_shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.
A list of Nfeature_list_dense shapes; the shapes of
data in each FeatureList given in feature_list_dense_keys.
The shape of each Feature in the FeatureList corresponding to
feature_list_dense_key[j] must always equal
feature_list_dense_shapes[j].NumEntries().
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values).
context_sparse_indices: A list with the same length as `context_sparse_keys` of `Tensor` objects with type `int64`.
context_sparse_values: A list of `Tensor` objects of type `context_sparse_types`.
context_sparse_shapes: A list with the same length as `context_sparse_keys` of `Tensor` objects with type `int64`.
context_dense_values: A list of `Tensor` objects. Has the same type as `context_dense_defaults`.
feature_list_sparse_indices: A list with the same length as `feature_list_sparse_keys` of `Tensor` objects with type `int64`.
feature_list_sparse_values: A list of `Tensor` objects of type `feature_list_sparse_types`.
feature_list_sparse_shapes: A list with the same length as `feature_list_sparse_keys` of `Tensor` objects with type `int64`.
feature_list_dense_values: A list of `Tensor` objects of type `feature_list_dense_types`.
"""
result = _op_def_lib.apply_op("ParseSingleSequenceExample",
serialized=serialized,
feature_list_dense_missing_assumed_empty=feature_list_dense_missing_assumed_empty,
context_sparse_keys=context_sparse_keys,
context_dense_keys=context_dense_keys,
feature_list_sparse_keys=feature_list_sparse_keys,
feature_list_dense_keys=feature_list_dense_keys,
context_dense_defaults=context_dense_defaults,
debug_name=debug_name,
context_sparse_types=context_sparse_types,
feature_list_dense_types=feature_list_dense_types,
context_dense_shapes=context_dense_shapes,
feature_list_sparse_types=feature_list_sparse_types,
feature_list_dense_shapes=feature_list_dense_shapes,
name=name)
return _ParseSingleSequenceExampleOutput._make(result)
def parse_tensor(serialized, out_type, name=None):
r"""Transforms a serialized tensorflow.TensorProto proto into a Tensor.
Args:
serialized: A `Tensor` of type `string`.
A scalar string containing a serialized TensorProto proto.
out_type: A `tf.DType`.
The type of the serialized tensor. The provided type must match the
type of the serialized tensor and no implicit conversion will take place.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `out_type`. A Tensor of type `out_type`.
"""
result = _op_def_lib.apply_op("ParseTensor", serialized=serialized,
out_type=out_type, name=name)
return result
def string_to_number(string_tensor, out_type=None, name=None):
r"""Converts each string in the input Tensor to the specified numeric type.
(Note that int32 overflow results in an error while float overflow
results in a rounded value.)
Args:
string_tensor: A `Tensor` of type `string`.
out_type: An optional `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.int64`. Defaults to `tf.float32`.
The numeric type to interpret each string in `string_tensor` as.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `out_type`.
A Tensor of the same shape as the input `string_tensor`.
"""
result = _op_def_lib.apply_op("StringToNumber", string_tensor=string_tensor,
out_type=out_type, name=name)
return result
def _InitOpDefLibrary():
op_list = _op_def_pb2.OpList()
_text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
_InitOpDefLibrary.op_list_ascii = """op {
name: "DecodeCSV"
input_arg {
name: "records"
type: DT_STRING
}
input_arg {
name: "record_defaults"
type_list_attr: "OUT_TYPE"
}
output_arg {
name: "output"
type_list_attr: "OUT_TYPE"
}
attr {
name: "OUT_TYPE"
type: "list(type)"
has_minimum: true
minimum: 1
allowed_values {
list {
type: DT_FLOAT
type: DT_INT32
type: DT_INT64
type: DT_STRING
}
}
}
attr {
name: "field_delim"
type: "string"
default_value {
s: ","
}
}
}
op {
name: "DecodeJSONExample"
input_arg {
name: "json_examples"
type: DT_STRING
}
output_arg {
name: "binary_examples"
type: DT_STRING
}
}
op {
name: "DecodeRaw"
input_arg {
name: "bytes"
type: DT_STRING
}
output_arg {
name: "output"
type_attr: "out_type"
}
attr {
name: "out_type"
type: "type"
allowed_values {
list {
type: DT_HALF
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT32
type: DT_UINT8
type: DT_INT16
type: DT_INT8
type: DT_INT64
}
}
}
attr {
name: "little_endian"
type: "bool"
default_value {
b: true
}
}
}
op {
name: "ParseExample"
input_arg {
name: "serialized"
type: DT_STRING
}
input_arg {
name: "names"
type: DT_STRING
}
input_arg {
name: "sparse_keys"
type: DT_STRING
number_attr: "Nsparse"
}
input_arg {
name: "dense_keys"
type: DT_STRING
number_attr: "Ndense"
}
input_arg {
name: "dense_defaults"
type_list_attr: "Tdense"
}
output_arg {
name: "sparse_indices"
type: DT_INT64
number_attr: "Nsparse"
}
output_arg {
name: "sparse_values"
type_list_attr: "sparse_types"
}
output_arg {
name: "sparse_shapes"
type: DT_INT64
number_attr: "Nsparse"
}
output_arg {
name: "dense_values"
type_list_attr: "Tdense"
}
attr {
name: "Nsparse"
type: "int"
has_minimum: true
}
attr {
name: "Ndense"
type: "int"
has_minimum: true
}
attr {
name: "sparse_types"
type: "list(type)"
has_minimum: true
allowed_values {
list {
type: DT_FLOAT
type: DT_INT64
type: DT_STRING
}
}
}
attr {
name: "Tdense"
type: "list(type)"
has_minimum: true
allowed_values {
list {
type: DT_FLOAT
type: DT_INT64
type: DT_STRING
}
}
}
attr {
name: "dense_shapes"
type: "list(shape)"
has_minimum: true
}
}
op {
name: "ParseSingleSequenceExample"
input_arg {
name: "serialized"
type: DT_STRING
}
input_arg {
name: "feature_list_dense_missing_assumed_empty"
type: DT_STRING
}
input_arg {
name: "context_sparse_keys"
type: DT_STRING
number_attr: "Ncontext_sparse"
}
input_arg {
name: "context_dense_keys"
type: DT_STRING
number_attr: "Ncontext_dense"
}
input_arg {
name: "feature_list_sparse_keys"
type: DT_STRING
number_attr: "Nfeature_list_sparse"
}
input_arg {
name: "feature_list_dense_keys"
type: DT_STRING
number_attr: "Nfeature_list_dense"
}
input_arg {
name: "context_dense_defaults"
type_list_attr: "Tcontext_dense"
}
input_arg {
name: "debug_name"
type: DT_STRING
}
output_arg {
name: "context_sparse_indices"
type: DT_INT64
number_attr: "Ncontext_sparse"
}
output_arg {
name: "context_sparse_values"
type_list_attr: "context_sparse_types"
}
output_arg {
name: "context_sparse_shapes"
type: DT_INT64
number_attr: "Ncontext_sparse"
}
output_arg {
name: "context_dense_values"
type_list_attr: "Tcontext_dense"
}
output_arg {
name: "feature_list_sparse_indices"
type: DT_INT64
number_attr: "Nfeature_list_sparse"
}
output_arg {
name: "feature_list_sparse_values"
type_list_attr: "feature_list_sparse_types"
}
output_arg {
name: "feature_list_sparse_shapes"
type: DT_INT64
number_attr: "Nfeature_list_sparse"
}
output_arg {
name: "feature_list_dense_values"
type_list_attr: "feature_list_dense_types"
}
attr {
name: "Ncontext_sparse"
type: "int"
default_value {
i: 0
}
has_minimum: true
}
attr {
name: "Ncontext_dense"
type: "int"
default_value {
i: 0
}
has_minimum: true
}
attr {
name: "Nfeature_list_sparse"
type: "int"
default_value {
i: 0
}
has_minimum: true
}
attr {
name: "Nfeature_list_dense"
type: "int"
default_value {
i: 0
}
has_minimum: true
}
attr {
name: "context_sparse_types"
type: "list(type)"
default_value {
list {
}
}
has_minimum: true
allowed_values {
list {
type: DT_FLOAT
type: DT_INT64
type: DT_STRING
}
}
}
attr {
name: "Tcontext_dense"
type: "list(type)"
default_value {
list {
}
}
has_minimum: true
allowed_values {
list {
type: DT_FLOAT
type: DT_INT64
type: DT_STRING
}
}
}
attr {
name: "feature_list_dense_types"
type: "list(type)"
default_value {
list {
}
}
has_minimum: true
allowed_values {
list {
type: DT_FLOAT
type: DT_INT64
type: DT_STRING
}
}
}
attr {
name: "context_dense_shapes"
type: "list(shape)"
default_value {
list {
}
}
has_minimum: true
}
attr {
name: "feature_list_sparse_types"
type: "list(type)"
default_value {
list {
}
}
has_minimum: true
allowed_values {
list {
type: DT_FLOAT
type: DT_INT64
type: DT_STRING
}
}
}
attr {
name: "feature_list_dense_shapes"
type: "list(shape)"
default_value {
list {
}
}
has_minimum: true
}
}
op {
name: "ParseTensor"
input_arg {
name: "serialized"
type: DT_STRING
}
output_arg {
name: "output"
type_attr: "out_type"
}
attr {
name: "out_type"
type: "type"
}
}
op {
name: "StringToNumber"
input_arg {
name: "string_tensor"
type: DT_STRING
}
output_arg {
name: "output"
type_attr: "out_type"
}
attr {
name: "out_type"
type: "type"
default_value {
type: DT_FLOAT
}
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT32
type: DT_INT64
}
}
}
}
"""
_op_def_lib = _InitOpDefLibrary()
| mit |
pandeyadarsh/sympy | sympy/printing/lambdarepr.py | 23 | 7102 | from __future__ import print_function, division
from .str import StrPrinter
from sympy.utilities import default_sort_key
class LambdaPrinter(StrPrinter):
"""
This printer converts expressions into strings that can be used by
lambdify.
"""
def _print_MatrixBase(self, expr):
return "%s(%s)" % (expr.__class__.__name__,
self._print((expr.tolist())))
_print_SparseMatrix = \
_print_MutableSparseMatrix = \
_print_ImmutableSparseMatrix = \
_print_Matrix = \
_print_DenseMatrix = \
_print_MutableDenseMatrix = \
_print_ImmutableMatrix = \
_print_ImmutableDenseMatrix = \
_print_MatrixBase
def _print_Piecewise(self, expr):
result = []
i = 0
for arg in expr.args:
e = arg.expr
c = arg.cond
result.append('((')
result.append(self._print(e))
result.append(') if (')
result.append(self._print(c))
result.append(') else (')
i += 1
result = result[:-1]
result.append(') else None)')
result.append(')'*(2*i - 2))
return ''.join(result)
def _print_And(self, expr):
result = ['(']
for arg in sorted(expr.args, key=default_sort_key):
result.extend(['(', self._print(arg), ')'])
result.append(' and ')
result = result[:-1]
result.append(')')
return ''.join(result)
def _print_Or(self, expr):
result = ['(']
for arg in sorted(expr.args, key=default_sort_key):
result.extend(['(', self._print(arg), ')'])
result.append(' or ')
result = result[:-1]
result.append(')')
return ''.join(result)
def _print_Not(self, expr):
result = ['(', 'not (', self._print(expr.args[0]), '))']
return ''.join(result)
def _print_BooleanTrue(self, expr):
return "True"
def _print_BooleanFalse(self, expr):
return "False"
class NumPyPrinter(LambdaPrinter):
"""
Numpy printer which handles vectorized piecewise functions,
logical operators, etc.
"""
_default_settings = {
"order": "none",
"full_prec": "auto",
}
def _print_seq(self, seq, delimiter=', '):
"General sequence printer: converts to tuple"
# Print tuples here instead of lists because numba supports
# tuples in nopython mode.
return '({},)'.format(delimiter.join(self._print(item) for item in seq))
def _print_MatMul(self, expr):
"Matrix multiplication printer"
return '({0})'.format(').dot('.join(self._print(i) for i in expr.args))
def _print_Piecewise(self, expr):
"Piecewise function printer"
exprs = '[{0}]'.format(','.join(self._print(arg.expr) for arg in expr.args))
conds = '[{0}]'.format(','.join(self._print(arg.cond) for arg in expr.args))
# If [default_value, True] is a (expr, cond) sequence in a Piecewise object
# it will behave the same as passing the 'default' kwarg to select()
# *as long as* it is the last element in expr.args.
# If this is not the case, it may be triggered prematurely.
return 'select({0}, {1}, default=nan)'.format(conds, exprs)
def _print_And(self, expr):
"Logical And printer"
# We have to override LambdaPrinter because it uses Python 'and' keyword.
# If LambdaPrinter didn't define it, we could use StrPrinter's
# version of the function and add 'logical_and' to NUMPY_TRANSLATIONS.
return '{0}({1})'.format('logical_and', ','.join(self._print(i) for i in expr.args))
def _print_Or(self, expr):
"Logical Or printer"
# We have to override LambdaPrinter because it uses Python 'or' keyword.
# If LambdaPrinter didn't define it, we could use StrPrinter's
# version of the function and add 'logical_or' to NUMPY_TRANSLATIONS.
return '{0}({1})'.format('logical_or', ','.join(self._print(i) for i in expr.args))
def _print_Not(self, expr):
"Logical Not printer"
# We have to override LambdaPrinter because it uses Python 'not' keyword.
# If LambdaPrinter didn't define it, we would still have to define our
# own because StrPrinter doesn't define it.
return '{0}({1})'.format('logical_not', ','.join(self._print(i) for i in expr.args))
# numexpr works by altering the string passed to numexpr.evaluate
# rather than by populating a namespace. Thus a special printer...
class NumExprPrinter(LambdaPrinter):
# key, value pairs correspond to sympy name and numexpr name
# functions not appearing in this dict will raise a TypeError
_numexpr_functions = {
'sin' : 'sin',
'cos' : 'cos',
'tan' : 'tan',
'asin': 'arcsin',
'acos': 'arccos',
'atan': 'arctan',
'atan2' : 'arctan2',
'sinh' : 'sinh',
'cosh' : 'cosh',
'tanh' : 'tanh',
'asinh': 'arcsinh',
'acosh': 'arccosh',
'atanh': 'arctanh',
'ln' : 'log',
'log': 'log',
'exp': 'exp',
'sqrt' : 'sqrt',
'Abs' : 'abs',
'conjugate' : 'conj',
'im' : 'imag',
're' : 'real',
'where' : 'where',
'complex' : 'complex',
'contains' : 'contains',
}
def _print_ImaginaryUnit(self, expr):
return '1j'
def _print_seq(self, seq, delimiter=', '):
# simplified _print_seq taken from pretty.py
s = [self._print(item) for item in seq]
if s:
return delimiter.join(s)
else:
return ""
def _print_Function(self, e):
func_name = e.func.__name__
nstr = self._numexpr_functions.get(func_name, None)
if nstr is None:
# check for implemented_function
if hasattr(e, '_imp_'):
return "(%s)" % self._print(e._imp_(*e.args))
else:
raise TypeError("numexpr does not support function '%s'" %
func_name)
return "%s(%s)" % (nstr, self._print_seq(e.args))
def blacklisted(self, expr):
raise TypeError("numexpr cannot be used with %s" %
expr.__class__.__name__)
# blacklist all Matrix printing
_print_SparseMatrix = \
_print_MutableSparseMatrix = \
_print_ImmutableSparseMatrix = \
_print_Matrix = \
_print_DenseMatrix = \
_print_MutableDenseMatrix = \
_print_ImmutableMatrix = \
_print_ImmutableDenseMatrix = \
blacklisted
# blacklist some python expressions
_print_list = \
_print_tuple = \
_print_Tuple = \
_print_dict = \
_print_Dict = \
blacklisted
def doprint(self, expr):
lstr = super(NumExprPrinter, self).doprint(expr)
return "evaluate('%s')" % lstr
def lambdarepr(expr, **settings):
"""
Returns a string usable for lambdifying.
"""
return LambdaPrinter(settings).doprint(expr)
| bsd-3-clause |
peheje/baselines | toy_examples/windy_gridworld/gridworld.py | 1 | 2524 | import tabulate
from namedlist import namedlist
# Position = namedlist("Position", ["row", "col"])
class Position:
def __init__(self, row, col):
self.row = row
self.col = col
def __eq__(self, other):
return self.row == other.row and self.col == other.col
class Gridworld:
def __init__(self, rows, cols):
self.goal = Position(rows - 10, cols - 1)
self.rows = rows
self.cols = cols
self.world = None
self.pos = None
self.col_wind = None
self.reset()
def reset(self):
self.world = [[" " for _ in range(self.cols)] for _ in range(self.rows)]
self.pos = Position(0, 0)
self.world[self.goal.row][self.goal.col] = "G"
self.world[self.pos.row][self.pos.col] = "S"
# self.col_wind = [0, 0, 0, 1, 1, 1, 2, 2, 1, 0]
self.__create_wind__()
self.__wall__()
def __create_wind__(self):
self.col_wind = []
q1 = self.cols // 4
q2 = self.cols // 2
q3 = (self.cols // 4) * 3
for i in range(self.cols):
if 0 < i <= q1:
self.col_wind.append(0)
elif q1 < i <= q2:
self.col_wind.append(0)
elif q2 < i <= q3:
self.col_wind.append(0)
else:
self.col_wind.append(0)
def __wall__(self):
q1 = self.cols // 4
q2 = self.cols // 2
q3 = (self.cols // 4) * 3
for i in range(0, self.rows-1):
self.world[i][q2] = "W"
def wind(self):
for i in range(self.col_wind[self.pos.col]):
self.world[self.pos.row][self.pos.col] = "x"
self.move(Position(-1, 0))
self.world[self.pos.row][self.pos.col] = "S"
self.world[self.goal.row][self.goal.col] = "G"
def move(self, move):
""" Returns reward = 1 if we moved into goal """
self.world[self.pos.row][self.pos.col] = "x"
new_row = self.pos.row + move.row
new_col = self.pos.col + move.col
if -1 < new_row < self.rows and -1 < new_col < self.cols:
if self.world[new_row][new_col] != "W":
self.pos.row = new_row
self.pos.col = new_col
self.world[self.pos.row][self.pos.col] = "S"
self.world[self.goal.row][self.goal.col] = "G"
return 1 if self.pos == self.goal else 0
def __str__(self):
print(self.col_wind)
return tabulate.tabulate(self.world, tablefmt="fancy_grid")
| mit |
lcpt/xc | verif/tests/preprocessor/cad/test_rotacion_01.py | 1 | 1072 | # -*- coding: utf-8 -*-
import xc_base
import geom
import xc
import math
import os
from model import model_inquiry as mi
__author__= "Luis C. Pérez Tato (LCPT)"
__copyright__= "Copyright 2014, LCPT"
__license__= "GPL"
__version__= "3.0"
__email__= "l.pereztato@gmail.com"
feProblem= xc.FEProblem()
preprocessor= feProblem.getPreprocessor
# Defining nodes
points= preprocessor.getMultiBlockTopology.getPoints
pt1= points.newPntIDPos3d(1,geom.Pos3d(0,0,0))
pt2= points.newPntIDPos3d(2,geom.Pos3d(0,0,1))
pt3= points.newPntIDPos3d(3,geom.Pos3d(1,0,0))
axis= geom.Line3d(pt1.getPos,pt2.getPos)
ang= math.pi/2.0
trfs= preprocessor.getMultiBlockTopology.getGeometricTransformations
rot1= trfs.newTransformation("rotation")
rot1.setAxisAng(axis,-ang)
pt4= points.newPntIDPos3d(4,rot1.getTrfPos(geom.Pos3d(0.0, 1.0, 0.0)))
dist34= mi.distance_2points(preprocessor, 3,4)
#print "dist(3,4)= ",dist34
import os
from miscUtils import LogMessages as lmsg
fname= os.path.basename(__file__)
if (dist34<1e-12):
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
| gpl-3.0 |
michaelhowden/eden | controllers/errors.py | 34 | 3565 | # -*- coding: utf-8 -*-
"""
HTTP Error handler -- implements nicer error pages
You need to add/replace the following to your routes.py in web2py directory
routes_onerror = [
('eden/400', '!'),
('eden/401', '!'),
('eden/509', '!'),
("eden/*", "/eden/errors/index"),
("*/*", "/eden/errors/index"),
]
NOTE: if Eden is installed elsewhere or exists under different name in applications folder,
just rename it in above list. Comment the last route to disable error
catching for other apps in the same web2py environment
"""
from gluon.http import defined_status
#s3.stylesheets.append("S3/errorpages.css")
error_messages = {
"NA":(T("Oops! Something went wrong..."), []),
400:(T("Sorry, I could not understand your request"),
[T("Check for errors in the URL, maybe the address was mistyped.")]),
403:(T("Sorry, that page is forbidden for some reason."),
[T("Check if the URL is pointing to a directory instead of a webpage."),
T("Check for errors in the URL, maybe the address was mistyped.")]),
404:(T("Sorry, we couldn't find that page."),
[T("Try checking the URL for errors, maybe it was mistyped."),
T("Try refreshing the page or hitting the back button on your browser.")]),
500:(T("Oops! something went wrong on our side."),
[T("Try hitting refresh/reload button or trying the URL from the address bar again."),
T("Please come back after sometime if that doesn\'t help.")]),
502:(T("Sorry, something went wrong."),
[T("The server received an incorrect response from another server that it was accessing to fill the request by the browser."),
T("Hit the back button on your browser to try again."),
T("Come back later.")]),
503:(T("Sorry, that service is temporary unavailable."),
[T("This might be due to a temporary overloading or maintenance of the server."),
T("Hit the back button on your browser to try again."),
T("Come back later.")]),
504:(T("Sorry, things didn't get done on time."),
[T("The server did not receive a timely response from another server that it was accessing to fill the request by the browser."),
T("Hit the back button on your browser to try again."),
T("Come back later. Everyone visiting this site is probably experiencing the same problem as you.")]),
}
def index():
''' default generic error page '''
try:
code = int(request.vars["code"])
description = defined_status[code]
except KeyError:
description = "unknown error"
code = "NA"
# Send a JSON message if non-interactive request
request_url = request.vars["request_url"]
path = request_url.split("/")
ext = [a for a in path if "." in a]
if ext:
fmt = ext[-1].rsplit(".", 1)[1].lower()
if fmt not in ("html", "iframe", "popup"):
xml = current.xml
code = request.vars["code"]
headers = {"Content-Type":"application/json"}
raise HTTP(int(code),
body=xml.json_message(False, code, description),
**headers)
details = " %s, %s " % (code, description)
try:
message, suggestions = error_messages[code]
except KeyError:
message, suggestions = error_messages["NA"]
# Retain the HTTP status code on error pages
response.status = int(code)
return dict(res=request.vars, message=message, details=details, suggestions=suggestions, app=appname)
| mit |
aabbox/kbengine | kbe/res/scripts/common/Lib/sre_constants.py | 106 | 7267 | #
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20031017
from _sre import MAXREPEAT
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
pass
# operators
FAILURE = "failure"
SUCCESS = "success"
ANY = "any"
ANY_ALL = "any_all"
ASSERT = "assert"
ASSERT_NOT = "assert_not"
AT = "at"
BIGCHARSET = "bigcharset"
BRANCH = "branch"
CALL = "call"
CATEGORY = "category"
CHARSET = "charset"
GROUPREF = "groupref"
GROUPREF_IGNORE = "groupref_ignore"
GROUPREF_EXISTS = "groupref_exists"
IN = "in"
IN_IGNORE = "in_ignore"
INFO = "info"
JUMP = "jump"
LITERAL = "literal"
LITERAL_IGNORE = "literal_ignore"
MARK = "mark"
MAX_REPEAT = "max_repeat"
MAX_UNTIL = "max_until"
MIN_REPEAT = "min_repeat"
MIN_UNTIL = "min_until"
NEGATE = "negate"
NOT_LITERAL = "not_literal"
NOT_LITERAL_IGNORE = "not_literal_ignore"
RANGE = "range"
REPEAT = "repeat"
REPEAT_ONE = "repeat_one"
SUBPATTERN = "subpattern"
MIN_REPEAT_ONE = "min_repeat_one"
# positions
AT_BEGINNING = "at_beginning"
AT_BEGINNING_LINE = "at_beginning_line"
AT_BEGINNING_STRING = "at_beginning_string"
AT_BOUNDARY = "at_boundary"
AT_NON_BOUNDARY = "at_non_boundary"
AT_END = "at_end"
AT_END_LINE = "at_end_line"
AT_END_STRING = "at_end_string"
AT_LOC_BOUNDARY = "at_loc_boundary"
AT_LOC_NON_BOUNDARY = "at_loc_non_boundary"
AT_UNI_BOUNDARY = "at_uni_boundary"
AT_UNI_NON_BOUNDARY = "at_uni_non_boundary"
# categories
CATEGORY_DIGIT = "category_digit"
CATEGORY_NOT_DIGIT = "category_not_digit"
CATEGORY_SPACE = "category_space"
CATEGORY_NOT_SPACE = "category_not_space"
CATEGORY_WORD = "category_word"
CATEGORY_NOT_WORD = "category_not_word"
CATEGORY_LINEBREAK = "category_linebreak"
CATEGORY_NOT_LINEBREAK = "category_not_linebreak"
CATEGORY_LOC_WORD = "category_loc_word"
CATEGORY_LOC_NOT_WORD = "category_loc_not_word"
CATEGORY_UNI_DIGIT = "category_uni_digit"
CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit"
CATEGORY_UNI_SPACE = "category_uni_space"
CATEGORY_UNI_NOT_SPACE = "category_uni_not_space"
CATEGORY_UNI_WORD = "category_uni_word"
CATEGORY_UNI_NOT_WORD = "category_uni_not_word"
CATEGORY_UNI_LINEBREAK = "category_uni_linebreak"
CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak"
OPCODES = [
# failure=0 success=1 (just because it looks better that way :-)
FAILURE, SUCCESS,
ANY, ANY_ALL,
ASSERT, ASSERT_NOT,
AT,
BRANCH,
CALL,
CATEGORY,
CHARSET, BIGCHARSET,
GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE,
IN, IN_IGNORE,
INFO,
JUMP,
LITERAL, LITERAL_IGNORE,
MARK,
MAX_UNTIL,
MIN_UNTIL,
NOT_LITERAL, NOT_LITERAL_IGNORE,
NEGATE,
RANGE,
REPEAT,
REPEAT_ONE,
SUBPATTERN,
MIN_REPEAT_ONE
]
ATCODES = [
AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY,
AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING,
AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY,
AT_UNI_NON_BOUNDARY
]
CHCODES = [
CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE,
CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD,
CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD,
CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT,
CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD,
CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK,
CATEGORY_UNI_NOT_LINEBREAK
]
def makedict(list):
d = {}
i = 0
for item in list:
d[item] = i
i = i + 1
return d
OPCODES = makedict(OPCODES)
ATCODES = makedict(ATCODES)
CHCODES = makedict(CHCODES)
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode "locale"
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
SRE_FLAG_ASCII = 256 # use ascii "locale"
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set
if __name__ == "__main__":
def dump(f, d, prefix):
items = sorted(d.items(), key=lambda a: a[1])
for k, v in items:
f.write("#define %s_%s %s\n" % (prefix, k.upper(), v))
f = open("sre_constants.h", "w")
f.write("""\
/*
* Secret Labs' Regular Expression Engine
*
* regular expression matching engine
*
* NOTE: This file is generated by sre_constants.py. If you need
* to change anything in here, edit sre_constants.py and run it.
*
* Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
*
* See the _sre.c file for information on usage and redistribution.
*/
""")
f.write("#define SRE_MAGIC %d\n" % MAGIC)
dump(f, OPCODES, "SRE_OP")
dump(f, ATCODES, "SRE")
dump(f, CHCODES, "SRE")
f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
f.write("#define SRE_FLAG_DEBUG %d\n" % SRE_FLAG_DEBUG)
f.write("#define SRE_FLAG_ASCII %d\n" % SRE_FLAG_ASCII)
f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
f.close()
print("done")
| lgpl-3.0 |
sparkslabs/kamaelia_ | Sketches/PT/likefile/axon_only_example.py | 3 | 1486 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time, sys, Axon
from Axon.likefile import LikeFile, schedulerThread
schedulerThread().start()
class Reverser(Axon.Component.component):
def main(self):
while True:
if self.dataReady('inbox'):
item = self.recv('inbox')
self.send(item[::-1], 'outbox') # strings have no "reverse" method, hence this indexing 'hack'.
else: self.pause()
yield 1
# Unix's "rev" tool, implemented using likefile.
reverser = LikeFile(Reverser())
while True:
line = sys.stdin.readline().rstrip() # get rid of the newline
reverser.put(line)
enil = reverser.get()
print enil | apache-2.0 |
melon-li/openstack-dashboard | openstack_dashboard/dashboards/admin/info/tests.py | 25 | 4011 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IgnoreArg # noqa
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:admin:info:index')
class SystemInfoViewTests(test.BaseAdminViewTests):
@test.create_stubs({api.base: ('is_service_enabled',),
api.nova: ('service_list',),
api.neutron: ('agent_list', 'is_extension_supported'),
api.cinder: ('service_list',),
api.heat: ('service_list',)})
def _test_base_index(self):
api.base.is_service_enabled(IsA(http.HttpRequest), IgnoreArg()) \
.MultipleTimes().AndReturn(True)
services = self.services.list()
api.nova.service_list(IsA(http.HttpRequest)).AndReturn(services)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'agent').AndReturn(True)
agents = self.agents.list()
api.neutron.agent_list(IsA(http.HttpRequest)).AndReturn(agents)
cinder_services = self.cinder_services.list()
api.cinder.service_list(IsA(http.HttpRequest)).\
AndReturn(cinder_services)
heat_services = self.heat_services.list()
api.heat.service_list(IsA(http.HttpRequest)).\
AndReturn(heat_services)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'admin/info/index.html')
return res
def test_index(self):
res = self._test_base_index()
services_tab = res.context['tab_group'].get_tab('services')
self.assertQuerysetEqual(
services_tab._tables['services'].data,
['<Service: compute>',
'<Service: volume>',
'<Service: volumev2>',
'<Service: image>',
'<Service: identity (native backend)>',
'<Service: object-store>',
'<Service: network>',
'<Service: ec2>',
'<Service: metering>',
'<Service: orchestration>',
'<Service: database>',
'<Service: data-processing>', ])
self.mox.VerifyAll()
def test_neutron_index(self):
res = self._test_base_index()
network_agents_tab = res.context['tab_group'].get_tab('network_agents')
self.assertQuerysetEqual(
network_agents_tab._tables['network_agents'].data,
[agent.__repr__() for agent in self.agents.list()]
)
self.mox.VerifyAll()
def test_cinder_index(self):
res = self._test_base_index()
cinder_services_tab = res.context['tab_group'].\
get_tab('cinder_services')
self.assertQuerysetEqual(
cinder_services_tab._tables['cinder_services'].data,
[service.__repr__() for service in self.cinder_services.list()]
)
self.mox.VerifyAll()
def test_heat_index(self):
res = self._test_base_index()
heat_services_tab = res.context['tab_group'].\
get_tab('heat_services')
self.assertQuerysetEqual(
heat_services_tab._tables['heat_services'].data,
[service.__repr__() for service in self.heat_services.list()]
)
self.mox.VerifyAll()
| apache-2.0 |
henryr/Impala | tests/failure/test_failpoints.py | 3 | 5652 | #!/usr/bin/env python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
# Injects failures at specific locations in each of the plan nodes. Currently supports
# two types of failures - cancellation of the query and a failure test hook.
#
import os
import pytest
import re
from copy import copy
from collections import defaultdict
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.impala_test_suite import ImpalaTestSuite, ALL_NODES_ONLY, LOG
from tests.common.test_vector import TestDimension
from tests.common.test_dimensions import create_exec_option_dimension
from tests.common.skip import *
from tests.util.test_file_parser import QueryTestSectionReader
from time import sleep
FAILPOINT_ACTION = ['FAIL', 'CANCEL']
FAILPOINT_LOCATION = ['PREPARE', 'OPEN', 'GETNEXT', 'CLOSE']
# The goal of this query is to use all of the node types.
# TODO: This query could be simplified a bit...
QUERY = """
select a.int_col, count(b.int_col) int_sum from functional_hbase.alltypesagg a
join
(select * from alltypes
where year=2009 and month=1 order by int_col limit 2500
union all
select * from alltypes
where year=2009 and month=2 limit 3000) b
on (a.int_col = b.int_col)
group by a.int_col
order by int_sum
"""
# TODO: Update to include INSERT when we support failpoints in the HDFS/Hbase sinks using
# a similar pattern as test_cancellation.py
QUERY_TYPE = ["SELECT"]
@skip_if_s3_hbase # S3: missing coverage: failures
class TestFailpoints(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def parse_plan_nodes_from_explain_output(cls, query, use_db="default"):
"""Parses the EXPLAIN <query> output and returns a map of node_name->list(node_id)"""
client = cls.create_impala_client()
client.execute("use %s" % use_db)
explain_result = client.execute("explain " + QUERY)
# Maps plan node names to their respective node ids. Expects format of <ID>:<NAME>
node_id_map = defaultdict(list)
for row in explain_result.data:
match = re.search(r'\s*(?P<node_id>\d+)\:(?P<node_type>\S+\s*\S+)', row)
if match is not None:
node_id_map[match.group('node_type')].append(int(match.group('node_id')))
return node_id_map
@classmethod
def add_test_dimensions(cls):
super(TestFailpoints, cls).add_test_dimensions()
# Executing an explain on the the test query will fail in an enviornment where hbase
# tables don't exist (s3). Since this happens before the tests are run, the skipif
# marker won't catch it. If 's3' is detected as a file system, return immedietely.
if os.getenv("TARGET_FILESYSTEM") == "s3": return
node_id_map = TestFailpoints.parse_plan_nodes_from_explain_output(QUERY, "functional")
assert node_id_map
cls.TestMatrix.add_dimension(TestDimension('location', *FAILPOINT_LOCATION))
cls.TestMatrix.add_dimension(TestDimension('target_node', *(node_id_map.items())))
cls.TestMatrix.add_dimension(TestDimension('action', *FAILPOINT_ACTION))
cls.TestMatrix.add_dimension(TestDimension('query_type', *QUERY_TYPE))
cls.TestMatrix.add_dimension(create_exec_option_dimension([0], [False], [0]))
# These are invalid test cases.
# For more info see IMPALA-55 and IMPALA-56.
cls.TestMatrix.add_constraint(lambda v: not (\
v.get_value('action') == 'FAIL' and\
v.get_value('location') in ['CLOSE'] and\
v.get_value('target_node')[0] in ['AGGREGATE', 'HASH JOIN']) and\
not (v.get_value('location') in ['PREPARE'] and \
v.get_value('action') == 'CANCEL'))
# Don't create CLOSE:WAIT debug actions to avoid leaking plan fragments (there's no
# way to cancel a plan fragment once Close() has been called)
cls.TestMatrix.add_constraint(
lambda v: not (v.get_value('action') == 'CANCEL'
and v.get_value('location') == 'CLOSE'))
def test_failpoints(self, vector):
query = QUERY
node_type, node_ids = vector.get_value('target_node')
action = vector.get_value('action')
location = vector.get_value('location')
for node_id in node_ids:
debug_action = '%d:%s:%s' % (node_id, location,
'WAIT' if action == 'CANCEL' else 'FAIL')
LOG.info('Current dubug action: SET DEBUG_ACTION=%s' % debug_action)
vector.get_value('exec_option')['debug_action'] = debug_action
if action == 'CANCEL':
self.__execute_cancel_action(query, vector)
elif action == 'FAIL':
self.__execute_fail_action(query, vector)
else:
assert 0, 'Unknown action: %s' % action
# We should be able to execute the same query successfully when no failures are
# injected.
del vector.get_value('exec_option')['debug_action']
self.execute_query(query, vector.get_value('exec_option'))
def __execute_fail_action(self, query, vector):
try:
self.execute_query(query, vector.get_value('exec_option'),
table_format=vector.get_value('table_format'))
assert 'Expected Failure'
except ImpalaBeeswaxException as e:
LOG.debug(e)
def __execute_cancel_action(self, query, vector):
LOG.info('Starting async query execution')
handle = self.execute_query_async(query, vector.get_value('exec_option'),
table_format=vector.get_value('table_format'))
LOG.info('Sleeping')
sleep(3)
cancel_result = self.client.cancel(handle)
self.client.close_query(handle)
assert cancel_result.status_code == 0,\
'Unexpected status code from cancel request: %s' % cancel_result
| apache-2.0 |
turbomanage/training-data-analyst | courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/werkzeug/utils.py | 7 | 25050 | # -*- coding: utf-8 -*-
"""
werkzeug.utils
~~~~~~~~~~~~~~
This module implements various utilities for WSGI applications. Most of
them are used by the request and response wrappers but especially for
middleware development it makes sense to use them without the wrappers.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import codecs
import os
import pkgutil
import re
import sys
from ._compat import iteritems
from ._compat import PY2
from ._compat import reraise
from ._compat import string_types
from ._compat import text_type
from ._compat import unichr
from ._internal import _DictAccessorProperty
from ._internal import _missing
from ._internal import _parse_signature
try:
from html.entities import name2codepoint
except ImportError:
from htmlentitydefs import name2codepoint
_format_re = re.compile(r"\$(?:(%s)|\{(%s)\})" % (("[a-zA-Z_][a-zA-Z0-9_]*",) * 2))
_entity_re = re.compile(r"&([^;]+);")
_filename_ascii_strip_re = re.compile(r"[^A-Za-z0-9_.-]")
_windows_device_files = (
"CON",
"AUX",
"COM1",
"COM2",
"COM3",
"COM4",
"LPT1",
"LPT2",
"LPT3",
"PRN",
"NUL",
)
class cached_property(property):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Foo(object):
@cached_property
def foo(self):
# calculate something important here
return 42
The class has to have a `__dict__` in order for this property to
work.
"""
# implementation detail: A subclass of python's builtin property
# decorator, we override __get__ to check for a cached value. If one
# chooses to invoke __get__ by hand the property will still work as
# expected because the lookup logic is replicated in __get__ for
# manual invocation.
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __set__(self, obj, value):
obj.__dict__[self.__name__] = value
def __get__(self, obj, type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
class environ_property(_DictAccessorProperty):
"""Maps request attributes to environment variables. This works not only
for the Werzeug request object, but also any other class with an
environ attribute:
>>> class Test(object):
... environ = {'key': 'value'}
... test = environ_property('key')
>>> var = Test()
>>> var.test
'value'
If you pass it a second value it's used as default if the key does not
exist, the third one can be a converter that takes a value and converts
it. If it raises :exc:`ValueError` or :exc:`TypeError` the default value
is used. If no default value is provided `None` is used.
Per default the property is read only. You have to explicitly enable it
by passing ``read_only=False`` to the constructor.
"""
read_only = True
def lookup(self, obj):
return obj.environ
class header_property(_DictAccessorProperty):
"""Like `environ_property` but for headers."""
def lookup(self, obj):
return obj.headers
class HTMLBuilder(object):
"""Helper object for HTML generation.
Per default there are two instances of that class. The `html` one, and
the `xhtml` one for those two dialects. The class uses keyword parameters
and positional parameters to generate small snippets of HTML.
Keyword parameters are converted to XML/SGML attributes, positional
arguments are used as children. Because Python accepts positional
arguments before keyword arguments it's a good idea to use a list with the
star-syntax for some children:
>>> html.p(class_='foo', *[html.a('foo', href='foo.html'), ' ',
... html.a('bar', href='bar.html')])
u'<p class="foo"><a href="foo.html">foo</a> <a href="bar.html">bar</a></p>'
This class works around some browser limitations and can not be used for
arbitrary SGML/XML generation. For that purpose lxml and similar
libraries exist.
Calling the builder escapes the string passed:
>>> html.p(html("<foo>"))
u'<p><foo></p>'
"""
_entity_re = re.compile(r"&([^;]+);")
_entities = name2codepoint.copy()
_entities["apos"] = 39
_empty_elements = {
"area",
"base",
"basefont",
"br",
"col",
"command",
"embed",
"frame",
"hr",
"img",
"input",
"keygen",
"isindex",
"link",
"meta",
"param",
"source",
"wbr",
}
_boolean_attributes = {
"selected",
"checked",
"compact",
"declare",
"defer",
"disabled",
"ismap",
"multiple",
"nohref",
"noresize",
"noshade",
"nowrap",
}
_plaintext_elements = {"textarea"}
_c_like_cdata = {"script", "style"}
def __init__(self, dialect):
self._dialect = dialect
def __call__(self, s):
return escape(s)
def __getattr__(self, tag):
if tag[:2] == "__":
raise AttributeError(tag)
def proxy(*children, **arguments):
buffer = "<" + tag
for key, value in iteritems(arguments):
if value is None:
continue
if key[-1] == "_":
key = key[:-1]
if key in self._boolean_attributes:
if not value:
continue
if self._dialect == "xhtml":
value = '="' + key + '"'
else:
value = ""
else:
value = '="' + escape(value) + '"'
buffer += " " + key + value
if not children and tag in self._empty_elements:
if self._dialect == "xhtml":
buffer += " />"
else:
buffer += ">"
return buffer
buffer += ">"
children_as_string = "".join(
[text_type(x) for x in children if x is not None]
)
if children_as_string:
if tag in self._plaintext_elements:
children_as_string = escape(children_as_string)
elif tag in self._c_like_cdata and self._dialect == "xhtml":
children_as_string = (
"/*<![CDATA[*/" + children_as_string + "/*]]>*/"
)
buffer += children_as_string + "</" + tag + ">"
return buffer
return proxy
def __repr__(self):
return "<%s for %r>" % (self.__class__.__name__, self._dialect)
html = HTMLBuilder("html")
xhtml = HTMLBuilder("xhtml")
# https://cgit.freedesktop.org/xdg/shared-mime-info/tree/freedesktop.org.xml.in
# https://www.iana.org/assignments/media-types/media-types.xhtml
# Types listed in the XDG mime info that have a charset in the IANA registration.
_charset_mimetypes = {
"application/ecmascript",
"application/javascript",
"application/sql",
"application/xml",
"application/xml-dtd",
"application/xml-external-parsed-entity",
}
def get_content_type(mimetype, charset):
"""Returns the full content type string with charset for a mimetype.
If the mimetype represents text, the charset parameter will be
appended, otherwise the mimetype is returned unchanged.
:param mimetype: The mimetype to be used as content type.
:param charset: The charset to be appended for text mimetypes.
:return: The content type.
.. verionchanged:: 0.15
Any type that ends with ``+xml`` gets a charset, not just those
that start with ``application/``. Known text types such as
``application/javascript`` are also given charsets.
"""
if (
mimetype.startswith("text/")
or mimetype in _charset_mimetypes
or mimetype.endswith("+xml")
):
mimetype += "; charset=" + charset
return mimetype
def detect_utf_encoding(data):
"""Detect which UTF encoding was used to encode the given bytes.
The latest JSON standard (:rfc:`8259`) suggests that only UTF-8 is
accepted. Older documents allowed 8, 16, or 32. 16 and 32 can be big
or little endian. Some editors or libraries may prepend a BOM.
:internal:
:param data: Bytes in unknown UTF encoding.
:return: UTF encoding name
.. versionadded:: 0.15
"""
head = data[:4]
if head[:3] == codecs.BOM_UTF8:
return "utf-8-sig"
if b"\x00" not in head:
return "utf-8"
if head in (codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE):
return "utf-32"
if head[:2] in (codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE):
return "utf-16"
if len(head) == 4:
if head[:3] == b"\x00\x00\x00":
return "utf-32-be"
if head[::2] == b"\x00\x00":
return "utf-16-be"
if head[1:] == b"\x00\x00\x00":
return "utf-32-le"
if head[1::2] == b"\x00\x00":
return "utf-16-le"
if len(head) == 2:
return "utf-16-be" if head.startswith(b"\x00") else "utf-16-le"
return "utf-8"
def format_string(string, context):
"""String-template format a string:
>>> format_string('$foo and ${foo}s', dict(foo=42))
'42 and 42s'
This does not do any attribute lookup etc. For more advanced string
formattings have a look at the `werkzeug.template` module.
:param string: the format string.
:param context: a dict with the variables to insert.
"""
def lookup_arg(match):
x = context[match.group(1) or match.group(2)]
if not isinstance(x, string_types):
x = type(string)(x)
return x
return _format_re.sub(lookup_arg, string)
def secure_filename(filename):
r"""Pass it a filename and it will return a secure version of it. This
filename can then safely be stored on a regular file system and passed
to :func:`os.path.join`. The filename returned is an ASCII only string
for maximum portability.
On windows systems the function also makes sure that the file is not
named after one of the special device files.
>>> secure_filename("My cool movie.mov")
'My_cool_movie.mov'
>>> secure_filename("../../../etc/passwd")
'etc_passwd'
>>> secure_filename(u'i contain cool \xfcml\xe4uts.txt')
'i_contain_cool_umlauts.txt'
The function might return an empty filename. It's your responsibility
to ensure that the filename is unique and that you abort or
generate a random filename if the function returned an empty one.
.. versionadded:: 0.5
:param filename: the filename to secure
"""
if isinstance(filename, text_type):
from unicodedata import normalize
filename = normalize("NFKD", filename).encode("ascii", "ignore")
if not PY2:
filename = filename.decode("ascii")
for sep in os.path.sep, os.path.altsep:
if sep:
filename = filename.replace(sep, " ")
filename = str(_filename_ascii_strip_re.sub("", "_".join(filename.split()))).strip(
"._"
)
# on nt a couple of special files are present in each folder. We
# have to ensure that the target file is not such a filename. In
# this case we prepend an underline
if (
os.name == "nt"
and filename
and filename.split(".")[0].upper() in _windows_device_files
):
filename = "_" + filename
return filename
def escape(s, quote=None):
"""Replace special characters "&", "<", ">" and (") to HTML-safe sequences.
There is a special handling for `None` which escapes to an empty string.
.. versionchanged:: 0.9
`quote` is now implicitly on.
:param s: the string to escape.
:param quote: ignored.
"""
if s is None:
return ""
elif hasattr(s, "__html__"):
return text_type(s.__html__())
elif not isinstance(s, string_types):
s = text_type(s)
if quote is not None:
from warnings import warn
warn(
"The 'quote' parameter is no longer used as of version 0.9"
" and will be removed in version 1.0.",
DeprecationWarning,
stacklevel=2,
)
s = (
s.replace("&", "&")
.replace("<", "<")
.replace(">", ">")
.replace('"', """)
)
return s
def unescape(s):
"""The reverse function of `escape`. This unescapes all the HTML
entities, not only the XML entities inserted by `escape`.
:param s: the string to unescape.
"""
def handle_match(m):
name = m.group(1)
if name in HTMLBuilder._entities:
return unichr(HTMLBuilder._entities[name])
try:
if name[:2] in ("#x", "#X"):
return unichr(int(name[2:], 16))
elif name.startswith("#"):
return unichr(int(name[1:]))
except ValueError:
pass
return u""
return _entity_re.sub(handle_match, s)
def redirect(location, code=302, Response=None):
"""Returns a response object (a WSGI application) that, if called,
redirects the client to the target location. Supported codes are
301, 302, 303, 305, 307, and 308. 300 is not supported because
it's not a real redirect and 304 because it's the answer for a
request with a request with defined If-Modified-Since headers.
.. versionadded:: 0.6
The location can now be a unicode string that is encoded using
the :func:`iri_to_uri` function.
.. versionadded:: 0.10
The class used for the Response object can now be passed in.
:param location: the location the response should redirect to.
:param code: the redirect status code. defaults to 302.
:param class Response: a Response class to use when instantiating a
response. The default is :class:`werkzeug.wrappers.Response` if
unspecified.
"""
if Response is None:
from .wrappers import Response
display_location = escape(location)
if isinstance(location, text_type):
# Safe conversion is necessary here as we might redirect
# to a broken URI scheme (for instance itms-services).
from .urls import iri_to_uri
location = iri_to_uri(location, safe_conversion=True)
response = Response(
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
"<title>Redirecting...</title>\n"
"<h1>Redirecting...</h1>\n"
"<p>You should be redirected automatically to target URL: "
'<a href="%s">%s</a>. If not click the link.'
% (escape(location), display_location),
code,
mimetype="text/html",
)
response.headers["Location"] = location
return response
def append_slash_redirect(environ, code=301):
"""Redirects to the same URL but with a slash appended. The behavior
of this function is undefined if the path ends with a slash already.
:param environ: the WSGI environment for the request that triggers
the redirect.
:param code: the status code for the redirect.
"""
new_path = environ["PATH_INFO"].strip("/") + "/"
query_string = environ.get("QUERY_STRING")
if query_string:
new_path += "?" + query_string
return redirect(new_path, code)
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If `silent` is True the return value will be `None` if the import fails.
:param import_name: the dotted name for the object to import.
:param silent: if set to `True` import errors are ignored and
`None` is returned instead.
:return: imported object
"""
# force the import name to automatically convert to strings
# __import__ is not able to handle unicode strings in the fromlist
# if the module is a package
import_name = str(import_name).replace(":", ".")
try:
try:
__import__(import_name)
except ImportError:
if "." not in import_name:
raise
else:
return sys.modules[import_name]
module_name, obj_name = import_name.rsplit(".", 1)
module = __import__(module_name, globals(), locals(), [obj_name])
try:
return getattr(module, obj_name)
except AttributeError as e:
raise ImportError(e)
except ImportError as e:
if not silent:
reraise(
ImportStringError, ImportStringError(import_name, e), sys.exc_info()[2]
)
def find_modules(import_path, include_packages=False, recursive=False):
"""Finds all the modules below a package. This can be useful to
automatically import all views / controllers so that their metaclasses /
function decorators have a chance to register themselves on the
application.
Packages are not returned unless `include_packages` is `True`. This can
also recursively list modules but in that case it will import all the
packages to get the correct load path of that module.
:param import_path: the dotted name for the package to find child modules.
:param include_packages: set to `True` if packages should be returned, too.
:param recursive: set to `True` if recursion should happen.
:return: generator
"""
module = import_string(import_path)
path = getattr(module, "__path__", None)
if path is None:
raise ValueError("%r is not a package" % import_path)
basename = module.__name__ + "."
for _importer, modname, ispkg in pkgutil.iter_modules(path):
modname = basename + modname
if ispkg:
if include_packages:
yield modname
if recursive:
for item in find_modules(modname, include_packages, True):
yield item
else:
yield modname
def validate_arguments(func, args, kwargs, drop_extra=True):
"""Checks if the function accepts the arguments and keyword arguments.
Returns a new ``(args, kwargs)`` tuple that can safely be passed to
the function without causing a `TypeError` because the function signature
is incompatible. If `drop_extra` is set to `True` (which is the default)
any extra positional or keyword arguments are dropped automatically.
The exception raised provides three attributes:
`missing`
A set of argument names that the function expected but where
missing.
`extra`
A dict of keyword arguments that the function can not handle but
where provided.
`extra_positional`
A list of values that where given by positional argument but the
function cannot accept.
This can be useful for decorators that forward user submitted data to
a view function::
from werkzeug.utils import ArgumentValidationError, validate_arguments
def sanitize(f):
def proxy(request):
data = request.values.to_dict()
try:
args, kwargs = validate_arguments(f, (request,), data)
except ArgumentValidationError:
raise BadRequest('The browser failed to transmit all '
'the data expected.')
return f(*args, **kwargs)
return proxy
:param func: the function the validation is performed against.
:param args: a tuple of positional arguments.
:param kwargs: a dict of keyword arguments.
:param drop_extra: set to `False` if you don't want extra arguments
to be silently dropped.
:return: tuple in the form ``(args, kwargs)``.
"""
parser = _parse_signature(func)
args, kwargs, missing, extra, extra_positional = parser(args, kwargs)[:5]
if missing:
raise ArgumentValidationError(tuple(missing))
elif (extra or extra_positional) and not drop_extra:
raise ArgumentValidationError(None, extra, extra_positional)
return tuple(args), kwargs
def bind_arguments(func, args, kwargs):
"""Bind the arguments provided into a dict. When passed a function,
a tuple of arguments and a dict of keyword arguments `bind_arguments`
returns a dict of names as the function would see it. This can be useful
to implement a cache decorator that uses the function arguments to build
the cache key based on the values of the arguments.
:param func: the function the arguments should be bound for.
:param args: tuple of positional arguments.
:param kwargs: a dict of keyword arguments.
:return: a :class:`dict` of bound keyword arguments.
"""
(
args,
kwargs,
missing,
extra,
extra_positional,
arg_spec,
vararg_var,
kwarg_var,
) = _parse_signature(func)(args, kwargs)
values = {}
for (name, _has_default, _default), value in zip(arg_spec, args):
values[name] = value
if vararg_var is not None:
values[vararg_var] = tuple(extra_positional)
elif extra_positional:
raise TypeError("too many positional arguments")
if kwarg_var is not None:
multikw = set(extra) & set([x[0] for x in arg_spec])
if multikw:
raise TypeError(
"got multiple values for keyword argument " + repr(next(iter(multikw)))
)
values[kwarg_var] = extra
elif extra:
raise TypeError("got unexpected keyword argument " + repr(next(iter(extra))))
return values
class ArgumentValidationError(ValueError):
"""Raised if :func:`validate_arguments` fails to validate"""
def __init__(self, missing=None, extra=None, extra_positional=None):
self.missing = set(missing or ())
self.extra = extra or {}
self.extra_positional = extra_positional or []
ValueError.__init__(
self,
"function arguments invalid. (%d missing, %d additional)"
% (len(self.missing), len(self.extra) + len(self.extra_positional)),
)
class ImportStringError(ImportError):
"""Provides information about a failed :func:`import_string` attempt."""
#: String in dotted notation that failed to be imported.
import_name = None
#: Wrapped exception.
exception = None
def __init__(self, import_name, exception):
self.import_name = import_name
self.exception = exception
msg = (
"import_string() failed for %r. Possible reasons are:\n\n"
"- missing __init__.py in a package;\n"
"- package or module path not included in sys.path;\n"
"- duplicated package or module name taking precedence in "
"sys.path;\n"
"- missing module, class, function or variable;\n\n"
"Debugged import:\n\n%s\n\n"
"Original exception:\n\n%s: %s"
)
name = ""
tracked = []
for part in import_name.replace(":", ".").split("."):
name += (name and ".") + part
imported = import_string(name, silent=True)
if imported:
tracked.append((name, getattr(imported, "__file__", None)))
else:
track = ["- %r found in %r." % (n, i) for n, i in tracked]
track.append("- %r not found." % name)
msg = msg % (
import_name,
"\n".join(track),
exception.__class__.__name__,
str(exception),
)
break
ImportError.__init__(self, msg)
def __repr__(self):
return "<%s(%r, %r)>" % (
self.__class__.__name__,
self.import_name,
self.exception,
)
from werkzeug import _DeprecatedImportModule
_DeprecatedImportModule(
__name__,
{
".datastructures": [
"CombinedMultiDict",
"EnvironHeaders",
"Headers",
"MultiDict",
],
".http": ["dump_cookie", "parse_cookie"],
},
"Werkzeug 1.0",
)
del _DeprecatedImportModule
| apache-2.0 |
tlatzko/spmcluster | .tox/2.7-nocov/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langbulgarianmodel.py | 2965 | 12784 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
# this table is modified base on win1251BulgarianCharToOrderMap, so
# only number <64 is sure valid
Latin5_BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
)
win1251BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 96.9392%
# first 1024 sequences:3.0618%
# rest sequences: 0.2992%
# negative sequences: 0.0020%
BulgarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
)
Latin5BulgarianModel = {
'charToOrderMap': Latin5_BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
Win1251BulgarianModel = {
'charToOrderMap': win1251BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
# flake8: noqa
| bsd-2-clause |
nauer/BI-Army-Knife | src/linegrep.py | 1 | 9553 | #! /usr/bin/env python3
# encoding: utf-8
'''
linegrep -- grep lines from stdoutput or file and extract matching groups to csv
linegrep is a description
It defines classes_and_methods
@author: Norbert Auer
@copyright: 2014 University of Natural Resources and Life Sciences, Vienna. All rights reserved.
@license: license
@contact: norbert.auer@boku.ac.at
@deffield updated: Updated
'''
import sys
import os
import re
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
from argparse import FileType
from operator import itemgetter
__all__ = []
__version__ = 1.4
__date__ = '2014-06-04'
__updated__ = '2015-06-23'
DEBUG = 0
TESTRUN = 0
PROFILE = 0
class CLIError(Exception):
'''Generic exception to raise and log different fatal errors.'''
def __init__(self, msg):
super(CLIError).__init__(type(self))
self.msg = "E: %s" % msg
def __str__(self):
return self.msg
def __unicode__(self):
return self.msg
def start(args):
if(DEBUG):
print(args.pattern)
#args.file.readline()
pattern = []
Pattern = [] # Optional patterns
for p in args.pattern:
pattern.append(re.compile(p))
if args.opt_pattern is not None:
for P in args.opt_pattern:
Pattern.append(re.compile(P))
if args.output:
f = args.output
else:
f = sys.stdout
i = 1
if args.fr:
i = args.fr -1
for i in range(args.fr - 1):
args.file.readline()
results = []
for line in args.file:
i = i + 1
if args.to is not None and args.to < i:
break
groups = ()
# Process pattern
for p in pattern:
rematch = p.search(line)
if rematch:
if len(rematch.groups()) == 0:
groups = None
break
groups = groups + rematch.groups()
else:
groups = None
break
# line is unmatch if not all patterns match
if groups is None:
if args.unmatch:
args.unmatch[0].write(line)
continue
else:
# Process optional pattern
for P in Pattern:
rematch = P.search(line)
if rematch:
if len(rematch.groups()) == 0:
groups = groups + ("",)
groups = groups + rematch.groups()
else:
groups = groups + ("",)
# Process split
splitres = None
if args.split:
split1 = re.compile(args.split[0])
split2 = re.compile(args.split[1])
rematch = split1.search(line)
if rematch:
if len(rematch.groups()) > 0:
splitres = split2.split("".join(list(rematch.groups())))
if splitres is not None:
for s in splitres:
results.append(groups + (s,))
else:
if len(groups) > 0:
results.append(groups)
else:
if args.unmatch:
args.unmatch[0].write(line)
if len(results) > 0:
# Group and Count
if args.group:
results = [l + (results.count(l),) for l in set(results)]
# Sort
if args.sort is not None:
# First -s value is the main sorter therefore the last sort
results.reverse()
for s in args.sort:
if abs(s) > len(results[0]):
print("WARNING: -s {0} is ignored. Result has only {1} columns.".format(s,len(results[0])), file=sys.stderr)
elif abs(s) == 0:
print("WARNING: -s {0} is ignored. Columns start with index 1.".format(s,len(results[0])), file=sys.stderr)
else:
results = sorted(results, key=itemgetter(abs(s)-1),reverse=s<0)
# Add Header
if args.header:
f.write(args.header + '\n')
# Write results
for line in results:
f.write(args.delimiter.join(map(str,line)) + '\n')
f.close()
def main(argv=None): # IGNORE:C0111
'''Command line options.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by Norbert Auer on %s.
Copyright 2014 University of Natural Resources and Life Sciences, Vienna. All rights reserved.
Licensed under the Apache License 2.0
http://www.apache.org/licenses/LICENSE-2.0
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__date__))
try:
# Setup argument parser
parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)
#parser.add_argument("-v", "--verbose", dest="verbose", action="count", help="set verbosity level [default: %(default)s]")
parser.add_argument('-V', '--version', action='version', version=program_version_message)
parser.add_argument('-H', '--header', nargs='?', help="Add header line to output. To add tabs use Ctrl+v+tab in the bash.",type=str)
parser.add_argument('-p', '--pattern', nargs='+', help="Grep pattern.",type=str, default=['.*'])
parser.add_argument('-P', '--opt-pattern', nargs='*', help="Optional grep pattern. Count line also if pattern is not found.",type=str)
parser.add_argument('-r', '--split', nargs=2, help="Split pattern. First pattern for sequence to split. Second pattern for split.",type=str)
parser.add_argument('file', nargs='?', type=FileType('r'), default='-', help="File to grep. Leave empty or use '-' to read from Stdin.")
parser.add_argument('-d', '--delimiter', help='Set the delimiter for the output',type=str, default='\t')
parser.add_argument('-f', '--from', dest='fr', help='Skip N-1 lines from begin of file. Use also the --to option to limit input',type=int)
parser.add_argument('-t', '--to', help='Read only to this line. All other lines are skipped.',type=int)
parser.add_argument('-o', '--output', help='Use output file instead of stdout',type=FileType('w'))
parser.add_argument('-g', '--group', help='Instead of normal input identical lines are grouped together and an additional column is added with the group count.', action='store_true')
parser.add_argument('-s', '--sort', nargs='+', help='Set columns for sorting. Use + or - to set descending or ascending order i.e -s -2 3 for sorting column 2 in descending order and than column 3 in ascending order.',type=int)
parser.add_argument('-u', '--unmatch', nargs=1, type=FileType('w'), help="Write unmatched lines into file.")
# Process arguments
args = parser.parse_args()
if DEBUG:
print(args)
start(args)
#if verbose > 0:
# print("Verbose mode on")
return 0
except KeyboardInterrupt:
### handle keyboard interrupt ###
return 0
except Exception as e:
if DEBUG or TESTRUN:
raise(e)
indent = len(program_name) * " "
sys.stderr.write(program_name + ": " + repr(e) + "\n")
sys.stderr.write(indent + " for help use --help")
return 2
if __name__ == "__main__":
if DEBUG:
#sys.argv.append("-h")
sys.argv.append("-d")
sys.argv.append("\t")
#sys.argv.append("-g")
#sys.argv.append("-f")
#sys.argv.append("5")
#sys.argv.append("-t")
#sys.argv.append("6")
#sys.argv.append("-s")
#sys.argv.append("1")
#sys.argv.append("-2")
#sys.argv.append("-0")
#sys.argv.append("2")
#sys.argv.append("0")
#sys.argv.append("-u")
#sys.argv.append("../test/unmatch.output")
sys.argv.append("-p")
sys.argv.append("EMORG:(AF3[^\s]*)")
sys.argv.append("-P")
sys.argv.append("(84\.)")
#sys.argv.append("(\d+\.\d*)")
#sys.argv.append("-p")
#sys.argv.append("(ORG:)")
sys.argv.append("-r")
sys.argv.append("\s(\d*\.\d*\s+(?:\d+\s+){2})")
sys.argv.append("\s+")
sys.argv.append("--")
sys.argv.append("../test/test.blast")
if TESTRUN:
import doctest
doctest.testmod()
if PROFILE:
import cProfile
import pstats
profile_filename = 'linegrep_profile.txt'
cProfile.run('main()', profile_filename)
statsfile = open("profile_stats.txt", "wb")
p = pstats.Stats(profile_filename, stream=statsfile)
stats = p.strip_dirs().sort_stats('cumulative')
stats.print_stats()
statsfile.close()
sys.exit(0)
sys.exit(main())
| apache-2.0 |
getavalon/core | avalon/vendor/qtawesome/iconic_font.py | 3 | 9595 | """Classes handling iconic fonts"""
from __future__ import print_function
import json
import os
from .. import six
from ..Qt import QtCore, QtGui
_default_options = {
'color': QtGui.QColor(50, 50, 50),
'color_disabled': QtGui.QColor(150, 150, 150),
'opacity': 1.0,
'scale_factor': 1.0,
}
def set_global_defaults(**kwargs):
"""Set global defaults for all icons"""
valid_options = ['active', 'animation', 'color', 'color_active',
'color_disabled', 'color_selected', 'disabled', 'offset',
'scale_factor', 'selected']
for kw in kwargs:
if kw in valid_options:
_default_options[kw] = kwargs[kw]
else:
error = "Invalid option '{0}'".format(kw)
raise KeyError(error)
class CharIconPainter:
"""Char icon painter"""
def paint(self, iconic, painter, rect, mode, state, options):
"""Main paint method"""
for opt in options:
self._paint_icon(iconic, painter, rect, mode, state, opt)
def _paint_icon(self, iconic, painter, rect, mode, state, options):
"""Paint a single icon"""
painter.save()
color, char = options['color'], options['char']
if mode == QtGui.QIcon.Disabled:
color = options.get('color_disabled', color)
char = options.get('disabled', char)
elif mode == QtGui.QIcon.Active:
color = options.get('color_active', color)
char = options.get('active', char)
elif mode == QtGui.QIcon.Selected:
color = options.get('color_selected', color)
char = options.get('selected', char)
painter.setPen(QtGui.QColor(color))
# A 16 pixel-high icon yields a font size of 14, which is pixel perfect
# for font-awesome. 16 * 0.875 = 14
# The reason for not using full-sized glyphs is the negative bearing of
# fonts.
draw_size = 0.875 * round(rect.height() * options['scale_factor'])
prefix = options['prefix']
# Animation setup hook
animation = options.get('animation')
if animation is not None:
animation.setup(self, painter, rect)
painter.setFont(iconic.font(prefix, draw_size))
if 'offset' in options:
rect = QtCore.QRect(rect)
rect.translate(options['offset'][0] * rect.width(),
options['offset'][1] * rect.height())
painter.setOpacity(options.get('opacity', 1.0))
painter.drawText(rect,
QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter,
char)
painter.restore()
class CharIconEngine(QtGui.QIconEngine):
"""Specialization of QtGui.QIconEngine used to draw font-based icons"""
def __init__(self, iconic, painter, options):
super(CharIconEngine, self).__init__()
self.iconic = iconic
self.painter = painter
self.options = options
def paint(self, painter, rect, mode, state):
self.painter.paint(
self.iconic, painter, rect, mode, state, self.options)
def pixmap(self, size, mode, state):
pm = QtGui.QPixmap(size)
pm.fill(QtCore.Qt.transparent)
self.paint(QtGui.QPainter(pm),
QtCore.QRect(QtCore.QPoint(0, 0), size),
mode,
state)
return pm
class IconicFont(QtCore.QObject):
"""Main class for managing iconic fonts"""
def __init__(self, *args):
"""Constructor
:param *args: tuples
Each positional argument is a tuple of 3 or 4 values
- The prefix string to be used when accessing a given font set
- The ttf font filename
- The json charmap filename
- Optionally, the directory containing these files. When not
provided, the files will be looked up in ./fonts/
"""
super(IconicFont, self).__init__()
self.painter = CharIconPainter()
self.painters = {}
self.fontname = {}
self.charmap = {}
for fargs in args:
self.load_font(*fargs)
def load_font(self,
prefix,
ttf_filename,
charmap_filename,
directory=None):
"""Loads a font file and the associated charmap
If `directory` is None, the files will be looked up in ./fonts/
Arguments
---------
prefix: str
prefix string to be used when accessing a given font set
ttf_filename: str
ttf font filename
charmap_filename: str
charmap filename
directory: str or None, optional
directory for font and charmap files
"""
def hook(obj):
result = {}
for key in obj:
result[key] = six.unichr(int(obj[key], 16))
return result
if directory is None:
directory = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'fonts')
with open(os.path.join(directory, charmap_filename), 'r') as codes:
self.charmap[prefix] = json.load(codes, object_hook=hook)
id_ = QtGui.QFontDatabase.addApplicationFont(
os.path.join(directory, ttf_filename))
loadedFontFamilies = QtGui.QFontDatabase.applicationFontFamilies(id_)
if(loadedFontFamilies):
self.fontname[prefix] = loadedFontFamilies[0]
else:
print('Font is empty')
def icon(self, *names, **kwargs):
"""Returns a QtGui.QIcon object corresponding to the provided icon name
(including prefix)
Arguments
---------
names: list of str
icon name, of the form PREFIX.NAME
options: dict
options to be passed to the icon painter
"""
options_list = kwargs.pop('options', [{}] * len(names))
general_options = kwargs
if len(options_list) != len(names):
error = '"options" must be a list of size {0}'.format(len(names))
raise Exception(error)
parsed_options = []
for i in range(len(options_list)):
specific_options = options_list[i]
parsed_options.append(self._parse_options(specific_options,
general_options,
names[i]))
# Process high level API
api_options = parsed_options
return self._icon_by_painter(self.painter, api_options)
def _parse_options(self, specific_options, general_options, name):
""" """
options = dict(_default_options, **general_options)
options.update(specific_options)
# Handle icons for states
icon_kw = ['disabled', 'active', 'selected', 'char']
names = [options.get(kw, name) for kw in icon_kw]
prefix, chars = self._get_prefix_chars(names)
options.update(dict(zip(*(icon_kw, chars))))
options.update({'prefix': prefix})
# Handle colors for states
color_kw = ['color_active', 'color_selected']
colors = [options.get(kw, options['color']) for kw in color_kw]
options.update(dict(zip(*(color_kw, colors))))
return options
def _get_prefix_chars(self, names):
""" """
chars = []
for name in names:
if '.' in name:
prefix, n = name.split('.')
if prefix in self.charmap:
if n in self.charmap[prefix]:
chars.append(self.charmap[prefix][n])
else:
error = 'Invalid icon name "{0}" in font "{1}"'.format(
n, prefix)
raise Exception(error)
else:
error = 'Invalid font prefix "{0}"'.format(prefix)
raise Exception(error)
else:
raise Exception('Invalid icon name')
return prefix, chars
def font(self, prefix, size):
"""Returns QtGui.QFont corresponding to the given prefix and size
Arguments
---------
prefix: str
prefix string of the loaded font
size: int
size for the font
"""
font = QtGui.QFont(self.fontname[prefix])
font.setPixelSize(size)
return font
def set_custom_icon(self, name, painter):
"""Associates a user-provided CharIconPainter to an icon name
The custom icon can later be addressed by calling
icon('custom.NAME') where NAME is the provided name for that icon.
Arguments
---------
name: str
name of the custom icon
painter: CharIconPainter
The icon painter, implementing
`paint(self, iconic, painter, rect, mode, state, options)`
"""
self.painters[name] = painter
def _custom_icon(self, name, **kwargs):
"""Returns the custom icon corresponding to the given name"""
options = dict(_default_options, **kwargs)
if name in self.painters:
painter = self.painters[name]
return self._icon_by_painter(painter, options)
else:
return QtGui.QIcon()
def _icon_by_painter(self, painter, options):
"""Returns the icon corresponding to the given painter"""
engine = CharIconEngine(self, painter, options)
return QtGui.QIcon(engine)
| mit |
BT-ojossen/l10n-switzerland | l10n_ch_account_statement_base_import/tests/test_ubs.py | 2 | 3813 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Steve Ferry
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from ..parsers.ubs_file_parser import UBSCSVParser
from .common import BaseParserTest, BaseStatementImportTest
class UBSParserTest(BaseParserTest):
def setUp(self):
super(BaseParserTest, self).setUp()
self.parser = UBSCSVParser(
self.get_file_content('UBS_export.csv')
)
def test_file_type_detection(self):
"""Test file type detection"""
self.assertTrue(self.parser.file_is_known())
self.parser.datas = 'BAM'
self.assertFalse(self.parser.file_is_known())
def test_parse(self):
"""Test file is correctly parsed"""
self.parser.parse()
self.assertEqual('CHF',
self.parser.get_currency())
statements = self.parser.get_statements()
self.assertIsInstance(statements, list)
self.assertEqual(len(statements), 1)
self.assertTrue(all(isinstance(x, dict) for x in statements))
statement = statements[0]
self.assertTrue(
all(isinstance(x, dict)for x in statement['transactions']))
self.assertEqual(20289.60, statement['balance_start'])
self.assertEqual(20827.00, statement['balance_end_real'])
self.assertEqual(6, len(statement['transactions']))
st_line_obj = statement['transactions'][0]
# Read common infos of first line
self.assertEqual(str(st_line_obj['date'].date()), "2014-08-06")
self.assertEqual(st_line_obj['amount'], -88.00)
self.assertEqual(st_line_obj['name'], "Standing order")
# here you can add more subtle and detailed test
# for each _parse functions using forged element tree
class UBSImportTest(BaseStatementImportTest):
def setUp(self):
super(UBSImportTest, self).setUp()
self.import_wizard_obj = self.import_wizard_obj.with_context(
journal_id=self.journal.id)
def test_ubs_import(self):
"""Test if V11 statement is correct"""
wizard = self.create_wizard_from_file('UBS_export.csv')
action = wizard.import_file()
statements = self.env['account.bank.statement'].browse(
action['context'].get('statement_ids')
)
statement = statements[0]
self.assertEqual(20289.60,
float("{0:.2f}".format(statement.balance_start)))
self.assertEqual(20827.00, statement.balance_end_real)
self.assertEqual(6, len(statement.line_ids))
# self.assertTrue(statement.account_id)
self.assertEqual(0, len(statement.mapped('line_ids.related_files')))
st_line = statement.line_ids[0]
# Read common infos of second line
self.assertEqual(st_line.date, "2014-08-06")
self.assertEqual(st_line.amount, -88.00)
self.assertEqual(st_line.name, "Standing order")
| agpl-3.0 |
baidu/Paddle | python/paddle/fluid/tests/unittests/test_cumsum_op.py | 5 | 3669 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
class TestSumOp1(OpTest):
def setUp(self):
self.op_type = "cumsum"
self.attrs = {'axis': 2}
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].cumsum(axis=2)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestSumOp2(OpTest):
def setUp(self):
self.op_type = "cumsum"
self.attrs = {'axis': -1, 'reverse': True}
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.outputs = {
'Out': np.flip(
np.flip(
self.inputs['X'], axis=2).cumsum(axis=2), axis=2)
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestSumOp3(OpTest):
def setUp(self):
self.op_type = "cumsum"
self.attrs = {'axis': 1}
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestSumOp4(OpTest):
def setUp(self):
self.op_type = "cumsum"
self.attrs = {'axis': 0}
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestSumOp5(OpTest):
def setUp(self):
self.op_type = "cumsum"
self.inputs = {'X': np.random.random((5, 6)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestSumOp7(OpTest):
def setUp(self):
self.op_type = "cumsum"
self.inputs = {'X': np.random.random((6)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestSumOp8(OpTest):
def setUp(self):
self.op_type = "cumsum"
self.attrs = {'axis': 2, "exclusive": True}
a = np.random.random((5, 6, 3)).astype("float64")
self.inputs = {'X': a}
self.outputs = {
'Out': np.concatenate(
(np.zeros(
(5, 6, 1), dtype=np.float64), a[:, :, :-1].cumsum(axis=2)),
axis=2)
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
kobejean/tensorflow | tensorflow/python/data/kernel_tests/flat_map_dataset_op_test.py | 9 | 5728 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class FlatMapDatasetTest(test_base.DatasetTestBase):
# pylint: disable=g-long-lambda
def testFlatMapDataset(self):
repeats = [1, 2, 3, 4, 5, 0, 1]
components = np.array(repeats, dtype=np.int64)
iterator = (
dataset_ops.Dataset.from_tensor_slices(components)
.flat_map(lambda x: dataset_ops.Dataset.from_tensors([x]).repeat(x))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for i in repeats:
for _ in range(i):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testNestedFlatMapDataset(self):
repeats = [[1, 2], [3, 4], [5, 0], [1, 7]]
components = np.array(repeats, dtype=np.int64)
iterator = (
dataset_ops.Dataset.from_tensor_slices(components)
.flat_map(lambda x: dataset_ops.Dataset.from_tensor_slices(x)
.flat_map(lambda y: dataset_ops.Dataset.from_tensors(y)
.repeat(y))).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for row in repeats:
for i in row:
for _ in range(i):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSharedResourceNestedFlatMapDataset(self):
repeats = [[1, 2], [3, 4], [5, 0], [1, 7]]
components = np.array(repeats, dtype=np.int64)
iterator = (
dataset_ops.Dataset.from_tensor_slices(components)
.flat_map(lambda x: dataset_ops.Dataset.from_tensor_slices(x)
.flat_map(lambda y: dataset_ops.Dataset.from_tensors(y)
.repeat(y))).make_initializable_iterator(
shared_name="shared_flat_map_iterator"))
init_op = iterator.initializer
get_next = iterator.get_next()
# Create two concurrent sessions that share the same iterator
# resource on the same server, and verify that a random
# interleaving of `Session.run(get_next)` calls on the two
# sessions yields the expected result.
server = server_lib.Server.create_local_server()
with session.Session(server.target) as sess1:
with session.Session(server.target) as sess2:
for _ in range(3):
sess = random.choice([sess1, sess2])
sess.run(init_op)
for row in repeats:
for i in row:
for _ in range(i):
sess = random.choice([sess1, sess2])
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess = random.choice([sess1, sess2])
sess.run(get_next)
def testMapDict(self):
iterator = (dataset_ops.Dataset.range(10)
.map(lambda x: {"foo": x * 2, "bar": x ** 2})
.flat_map(lambda d: dataset_ops.Dataset.from_tensors(d["foo"])
.repeat(d["bar"]))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for i in range(10):
for _ in range(i ** 2):
self.assertEqual(i * 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# pylint: enable=g-long-lambda
def testSparse(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _flat_map_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
iterator = (
dataset_ops.Dataset.range(10).map(_map_fn).flat_map(_flat_map_fn)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for i in range(10):
for j in range(2):
expected = [i, 0] if j % 2 == 0 else [0, -i]
self.assertAllEqual(expected, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
| apache-2.0 |
Tallisado/pyrobot_v1.0 | dev/resources/ondemand.py | 1 | 1147 | import os
def missing_pyrobot_envs(missing):
print '(pyrobot|ondemand) failed to receive all required env variables from pyrobot initiation script: ' + missing
exit(1)
# default is to run locally
envname_pyrobot_usesauce = "SAUCE"
envname_pyrobot_browser = "PYROBOT_BROWSER"
envname_pyrobot_remote_url = "PYROBOT_REMOTE_URL"
envname_pyrobot_caps = "PYROBOT_CAPS"
env_pyro_browser = ""
env_sauce_mode = ""
#print '(pyrobot|ondemand) %s %s %s' % (os.environ[envname_pyrobot_browser], os.environ[envname_pyrobot_remote_url], os.environ[envname_pyrobot_caps])
if not os.environ.get(envname_pyrobot_browser):
missing_pyrobot_envs()
else:
BROWSER = os.environ[envname_pyrobot_browser]
BASE_URL = os.environ['BASE_URL']
if not (os.environ.get(envname_pyrobot_remote_url), os.environ.get(envname_pyrobot_caps)):
missing_pyrobot_envs("%s %s" % (envname_pyrobot_remote_url, envname_pyrobot_caps))
#BROWSER += ' remote_url=%s desired_capabilities=%s' % (os.environ[envname_pyrobot_remote_url], os.environ[envname_pyrobot_caps])
REMOTE = os.environ[envname_pyrobot_remote_url]
CAPS = os.environ[envname_pyrobot_caps]
| apache-2.0 |
mrrrgn/filtered_websocket | build/lib/filtered_websocket/filters/base.py | 1 | 1711 | """
FilterBase and FilterMeta allow for the simple creation of a filter chains.
Any class that inherits from a child of FilterBase and FilterMeta
will have its filter method called upon run being executed from its parent class.
Ex:
>>> class A(FilterBase):
>>> class __metaclass__(FilterMeta):
>>> pass
>>> class B(A):
>>> @classmethod
>>> def filter(cls, web_socket_instance, data):
>>> print("foo")
>>> class C(A):
>>> @classmethod
>>> def filter(cls, web_socket_instance, data):
>>> print("bar")
>>> A.run(web_socket_instance)
foo
bar
"""
from __future__ import absolute_import
from six import add_metaclass
class FilterBase(object):
@classmethod
def run(cls, web_socket_instance, data=None):
for filter in cls._filters:
filter.filter(web_socket_instance, data)
@classmethod
def filter(cls, web_socket_instance, data=None):
raise NotImplementedError
class FilterMeta(type):
def __init__(self, name, type, other):
if self.__base__ is not FilterBase:
self.__class__._filters.append(self)
else:
self.__class__._filters = []
class DataFilterMeta(FilterMeta):
pass
@add_metaclass(DataFilterMeta)
class WebSocketDataFilter(FilterBase):
pass
class MessageFilterMeta(FilterMeta):
pass
@add_metaclass(MessageFilterMeta)
class WebSocketMessageFilter(FilterBase):
pass
class DisconnectFilterMeta(FilterMeta):
pass
@add_metaclass(DisconnectFilterMeta)
class WebSocketDisconnectFilter(FilterBase):
pass
class ConsumerFilterMeta(FilterMeta):
pass
@add_metaclass(ConsumerFilterMeta)
class WebSocketConsumerFilter(FilterBase):
pass
| gpl-2.0 |
ehsangolshani/crazy-hamster | .venv/lib/python3.5/site-packages/pip-9.0.1-py3.5.egg/pip/_vendor/distlib/resources.py | 335 | 10766 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2016 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import bisect
import io
import logging
import os
import pkgutil
import shutil
import sys
import types
import zipimport
from . import DistlibException
from .util import cached_property, get_cache_base, path_to_cache_dir, Cache
logger = logging.getLogger(__name__)
cache = None # created when needed
class ResourceCache(Cache):
def __init__(self, base=None):
if base is None:
# Use native string to avoid issues on 2.x: see Python #20140.
base = os.path.join(get_cache_base(), str('resource-cache'))
super(ResourceCache, self).__init__(base)
def is_stale(self, resource, path):
"""
Is the cache stale for the given resource?
:param resource: The :class:`Resource` being cached.
:param path: The path of the resource in the cache.
:return: True if the cache is stale.
"""
# Cache invalidation is a hard problem :-)
return True
def get(self, resource):
"""
Get a resource into the cache,
:param resource: A :class:`Resource` instance.
:return: The pathname of the resource in the cache.
"""
prefix, path = resource.finder.get_cache_info(resource)
if prefix is None:
result = path
else:
result = os.path.join(self.base, self.prefix_to_dir(prefix), path)
dirname = os.path.dirname(result)
if not os.path.isdir(dirname):
os.makedirs(dirname)
if not os.path.exists(result):
stale = True
else:
stale = self.is_stale(resource, path)
if stale:
# write the bytes of the resource to the cache location
with open(result, 'wb') as f:
f.write(resource.bytes)
return result
class ResourceBase(object):
def __init__(self, finder, name):
self.finder = finder
self.name = name
class Resource(ResourceBase):
"""
A class representing an in-package resource, such as a data file. This is
not normally instantiated by user code, but rather by a
:class:`ResourceFinder` which manages the resource.
"""
is_container = False # Backwards compatibility
def as_stream(self):
"""
Get the resource as a stream.
This is not a property to make it obvious that it returns a new stream
each time.
"""
return self.finder.get_stream(self)
@cached_property
def file_path(self):
global cache
if cache is None:
cache = ResourceCache()
return cache.get(self)
@cached_property
def bytes(self):
return self.finder.get_bytes(self)
@cached_property
def size(self):
return self.finder.get_size(self)
class ResourceContainer(ResourceBase):
is_container = True # Backwards compatibility
@cached_property
def resources(self):
return self.finder.get_resources(self)
class ResourceFinder(object):
"""
Resource finder for file system resources.
"""
if sys.platform.startswith('java'):
skipped_extensions = ('.pyc', '.pyo', '.class')
else:
skipped_extensions = ('.pyc', '.pyo')
def __init__(self, module):
self.module = module
self.loader = getattr(module, '__loader__', None)
self.base = os.path.dirname(getattr(module, '__file__', ''))
def _adjust_path(self, path):
return os.path.realpath(path)
def _make_path(self, resource_name):
# Issue #50: need to preserve type of path on Python 2.x
# like os.path._get_sep
if isinstance(resource_name, bytes): # should only happen on 2.x
sep = b'/'
else:
sep = '/'
parts = resource_name.split(sep)
parts.insert(0, self.base)
result = os.path.join(*parts)
return self._adjust_path(result)
def _find(self, path):
return os.path.exists(path)
def get_cache_info(self, resource):
return None, resource.path
def find(self, resource_name):
path = self._make_path(resource_name)
if not self._find(path):
result = None
else:
if self._is_directory(path):
result = ResourceContainer(self, resource_name)
else:
result = Resource(self, resource_name)
result.path = path
return result
def get_stream(self, resource):
return open(resource.path, 'rb')
def get_bytes(self, resource):
with open(resource.path, 'rb') as f:
return f.read()
def get_size(self, resource):
return os.path.getsize(resource.path)
def get_resources(self, resource):
def allowed(f):
return (f != '__pycache__' and not
f.endswith(self.skipped_extensions))
return set([f for f in os.listdir(resource.path) if allowed(f)])
def is_container(self, resource):
return self._is_directory(resource.path)
_is_directory = staticmethod(os.path.isdir)
def iterator(self, resource_name):
resource = self.find(resource_name)
if resource is not None:
todo = [resource]
while todo:
resource = todo.pop(0)
yield resource
if resource.is_container:
rname = resource.name
for name in resource.resources:
if not rname:
new_name = name
else:
new_name = '/'.join([rname, name])
child = self.find(new_name)
if child.is_container:
todo.append(child)
else:
yield child
class ZipResourceFinder(ResourceFinder):
"""
Resource finder for resources in .zip files.
"""
def __init__(self, module):
super(ZipResourceFinder, self).__init__(module)
archive = self.loader.archive
self.prefix_len = 1 + len(archive)
# PyPy doesn't have a _files attr on zipimporter, and you can't set one
if hasattr(self.loader, '_files'):
self._files = self.loader._files
else:
self._files = zipimport._zip_directory_cache[archive]
self.index = sorted(self._files)
def _adjust_path(self, path):
return path
def _find(self, path):
path = path[self.prefix_len:]
if path in self._files:
result = True
else:
if path and path[-1] != os.sep:
path = path + os.sep
i = bisect.bisect(self.index, path)
try:
result = self.index[i].startswith(path)
except IndexError:
result = False
if not result:
logger.debug('_find failed: %r %r', path, self.loader.prefix)
else:
logger.debug('_find worked: %r %r', path, self.loader.prefix)
return result
def get_cache_info(self, resource):
prefix = self.loader.archive
path = resource.path[1 + len(prefix):]
return prefix, path
def get_bytes(self, resource):
return self.loader.get_data(resource.path)
def get_stream(self, resource):
return io.BytesIO(self.get_bytes(resource))
def get_size(self, resource):
path = resource.path[self.prefix_len:]
return self._files[path][3]
def get_resources(self, resource):
path = resource.path[self.prefix_len:]
if path and path[-1] != os.sep:
path += os.sep
plen = len(path)
result = set()
i = bisect.bisect(self.index, path)
while i < len(self.index):
if not self.index[i].startswith(path):
break
s = self.index[i][plen:]
result.add(s.split(os.sep, 1)[0]) # only immediate children
i += 1
return result
def _is_directory(self, path):
path = path[self.prefix_len:]
if path and path[-1] != os.sep:
path += os.sep
i = bisect.bisect(self.index, path)
try:
result = self.index[i].startswith(path)
except IndexError:
result = False
return result
_finder_registry = {
type(None): ResourceFinder,
zipimport.zipimporter: ZipResourceFinder
}
try:
# In Python 3.6, _frozen_importlib -> _frozen_importlib_external
try:
import _frozen_importlib_external as _fi
except ImportError:
import _frozen_importlib as _fi
_finder_registry[_fi.SourceFileLoader] = ResourceFinder
_finder_registry[_fi.FileFinder] = ResourceFinder
del _fi
except (ImportError, AttributeError):
pass
def register_finder(loader, finder_maker):
_finder_registry[type(loader)] = finder_maker
_finder_cache = {}
def finder(package):
"""
Return a resource finder for a package.
:param package: The name of the package.
:return: A :class:`ResourceFinder` instance for the package.
"""
if package in _finder_cache:
result = _finder_cache[package]
else:
if package not in sys.modules:
__import__(package)
module = sys.modules[package]
path = getattr(module, '__path__', None)
if path is None:
raise DistlibException('You cannot get a finder for a module, '
'only for a package')
loader = getattr(module, '__loader__', None)
finder_maker = _finder_registry.get(type(loader))
if finder_maker is None:
raise DistlibException('Unable to locate finder for %r' % package)
result = finder_maker(module)
_finder_cache[package] = result
return result
_dummy_module = types.ModuleType(str('__dummy__'))
def finder_for_path(path):
"""
Return a resource finder for a path, which should represent a container.
:param path: The path.
:return: A :class:`ResourceFinder` instance for the path.
"""
result = None
# calls any path hooks, gets importer into cache
pkgutil.get_importer(path)
loader = sys.path_importer_cache.get(path)
finder = _finder_registry.get(type(loader))
if finder:
module = _dummy_module
module.__file__ = os.path.join(path, '')
module.__loader__ = loader
result = finder(module)
return result
| gpl-3.0 |
asphalt-framework/asphalt-serialization | tests/test_serializers.py | 1 | 9158 | import re
from datetime import datetime, timezone
from functools import partial
from types import SimpleNamespace
import pytest
from cbor2 import CBORTag
from msgpack import ExtType
from asphalt.serialization.serializers.cbor import CBORSerializer, CBORTypeCodec
from asphalt.serialization.serializers.json import JSONSerializer
from asphalt.serialization.serializers.msgpack import MsgpackSerializer, MsgpackTypeCodec
from asphalt.serialization.serializers.pickle import PickleSerializer
from asphalt.serialization.serializers.yaml import YAMLSerializer
class SimpleType:
def __init__(self, value_a, value_b):
self.value_a = value_a
self.value_b = value_b
def __eq__(self, other):
if isinstance(other, SimpleType):
return other.value_a == self.value_a and other.value_b == self.value_b
return NotImplemented
class SlottedSimpleType:
__slots__ = 'value_a', 'value_b'
def __init__(self, value_a, value_b):
self.value_a = value_a
self.value_b = value_b
def __getstate__(self):
return {'value_a': self.value_a, 'value_b': self.value_b}
def __setstate__(self, state):
self.value_a = state['value_a']
self.value_b = state['value_b']
def __eq__(self, other):
if isinstance(other, SlottedSimpleType):
return other.value_a == self.value_a and other.value_b == self.value_b
return NotImplemented
class CustomStateSimpleType(SimpleType):
def unmarshal(self, state):
self.value_a, self.value_b = state
def marshal(self):
return self.value_a, self.value_b
class UnserializableSimpleType:
__slots__ = 'value_a', 'value_b'
def __init__(self, value_a, value_b):
self.value_a = value_a
self.value_b = value_b
def marshal_datetime(dt: datetime):
return dt.timestamp()
def unmarshal_datetime(state):
return datetime.fromtimestamp(state, timezone.utc)
@pytest.fixture(params=['cbor', 'json', 'msgpack', 'pickle', 'yaml'])
def serializer_type(request):
return request.param
@pytest.fixture
def serializer(request, serializer_type):
kwargs = getattr(request, 'param', {})
return {
'cbor': partial(CBORSerializer, encoder_options=dict(value_sharing=True)),
'json': JSONSerializer,
'msgpack': MsgpackSerializer,
'pickle': PickleSerializer,
'yaml': YAMLSerializer
}[serializer_type](**kwargs)
@pytest.mark.parametrize('input', [
'åäö',
-8,
5.06,
[1, 'test', 1.23],
{'x': 'foo', 'bar': 'baz'}
], ids=['str', 'int', 'float', 'list', 'dict'])
def test_basic_types_roundtrip(serializer, input):
output = serializer.serialize(input)
assert isinstance(output, bytes)
deserialized = serializer.deserialize(output)
assert deserialized == input
@pytest.mark.parametrize('serializer_type', ['cbor', 'pickle', 'yaml'])
def test_circular_reference(serializer):
a = {'foo': 1}
b = {'a': a}
a['b'] = b
output = serializer.serialize(a)
assert isinstance(output, bytes)
other_a = serializer.deserialize(output)
assert other_a['foo'] == 1
other_b = other_a['b']
assert other_b['a'] is other_a
@pytest.mark.parametrize('serializer_type', ['cbor', 'msgpack', 'json'])
class TestCustomTypes:
@pytest.mark.parametrize('cls', [SimpleType, SlottedSimpleType], ids=['normal', 'slotted'])
def test_marshal_unmarshal(self, serializer, cls):
serializer.register_custom_type(cls)
testval = cls(1, {'a': 1})
testval2 = cls(2, testval)
output = serializer.serialize(testval2)
outval = serializer.deserialize(output)
assert outval == testval2
def test_custom_state(self, serializer):
"""Test that marshallers and umarshallers can be embedded into the relevant class."""
serializer.register_custom_type(CustomStateSimpleType, CustomStateSimpleType.marshal,
CustomStateSimpleType.unmarshal)
testval = CustomStateSimpleType('a', 1)
output = serializer.serialize(testval)
outval = serializer.deserialize(output)
assert outval == testval
def test_marshal_builtin(self, serializer):
"""Test that a single-argument unmarshaller can be used to unmarshal built-in types."""
serializer.register_custom_type(datetime, marshal_datetime, unmarshal_datetime)
dt = datetime(2016, 9, 9, 7, 21, 16, tzinfo=timezone.utc)
output = serializer.serialize(dt)
dt2 = serializer.deserialize(output)
assert dt == dt2
def test_missing_getattr(self, serializer):
testval = UnserializableSimpleType(1, 'a')
serializer.register_custom_type(UnserializableSimpleType)
exc = pytest.raises(TypeError, serializer.serialize, testval)
exc.match("'test_serializers.UnserializableSimpleType' has no __dict__ attribute and does "
"not implement __getstate__()")
def test_missing_setattr(self, serializer):
testval = UnserializableSimpleType(1, 'a')
serializer.register_custom_type(UnserializableSimpleType, lambda instance: {})
serialized = serializer.serialize(testval)
exc = pytest.raises(Exception, serializer.deserialize, serialized)
exc.match(
"'test_serializers.UnserializableSimpleType' has no __dict__ attribute and does not "
"implement __setstate__()")
def test_missing_marshaller(self, serializer_type, serializer):
serializer.register_custom_type(SlottedSimpleType)
testval = SimpleType(1, 'a')
exc = pytest.raises(Exception, serializer.serialize, testval)
exc.match('no marshaller found for type "test_serializers.SimpleType"')
def test_missing_unmarshaller(self, serializer):
serializer.register_custom_type(SlottedSimpleType)
serializer.register_custom_type(SimpleType, unmarshaller=None)
testval = SimpleType(1, 'a')
serialized = serializer.serialize(testval)
exc = pytest.raises(Exception, serializer.deserialize, serialized)
exc.match('no unmarshaller found for type "test_serializers.SimpleType"')
def test_nowrap(self, serializer):
serializer.register_custom_type(SimpleType, wrap_state=False)
testval = SimpleType(1, 'a')
serialized = serializer.serialize(testval)
deserialized = serializer.deserialize(serialized)
assert deserialized == {'value_a': 1, 'value_b': 'a'}
def test_mime_types(serializer):
assert re.match('[a-z]+/[a-z]+', serializer.mimetype)
@pytest.mark.parametrize('safe', [True, False], ids=['safe', 'unsafe'])
def test_yaml_safe_attribute(safe):
serializer = YAMLSerializer(safe=safe)
assert serializer.safe is safe
@pytest.mark.parametrize('serializer_type', ['msgpack'])
def test_msgpack_exttype_passthrough(serializer):
serializer.register_custom_type(SlottedSimpleType)
ext = ExtType(6, b'somedata')
data = serializer.serialize(ext)
obj = serializer.deserialize(data)
assert isinstance(obj, ExtType)
assert obj.code == 6
assert obj.data == b'somedata'
@pytest.mark.parametrize('serializer_type', ['cbor'])
def test_cbor_self_referential_objects(serializer):
value1 = SimpleNamespace()
value1.val = 1
value1.next = value2 = SimpleNamespace()
value2.val = 2
value2.previous = value1
serializer.register_custom_type(SimpleNamespace, typename='Simple')
data = serializer.serialize(value1)
obj = serializer.deserialize(data)
assert obj.val == 1
assert obj.next.val == 2
assert obj.next.previous is obj
@pytest.mark.parametrize('serializer_type', ['cbor'])
def test_cbor_oneshot_unmarshal(serializer):
def unmarshal_simple(state):
return SimpleType(**state)
obj = SimpleType(1, 2)
serializer.register_custom_type(SimpleType, unmarshaller=unmarshal_simple)
data = serializer.serialize(obj)
obj = serializer.deserialize(data)
assert obj.value_a == 1
assert obj.value_b == 2
@pytest.mark.parametrize('serializer_type', ['cbor'])
def test_cbor_raw_tag(serializer):
tag = CBORTag(6000, 'Hello')
serializer.register_custom_type(SimpleType)
data = serializer.serialize(tag)
tag = serializer.deserialize(data)
assert tag.tag == 6000
assert tag.value == 'Hello'
class TestObjectHook:
@pytest.fixture(params=['msgpack', 'cbor'])
def serializer(self, request):
if request.param == 'msgpack':
codec = MsgpackTypeCodec(type_code=None)
return MsgpackSerializer(custom_type_codec=codec)
else:
codec = CBORTypeCodec(type_tag=None)
return CBORSerializer(custom_type_codec=codec)
def test_object_hook(self, serializer):
value1 = SimpleNamespace()
value1.val = 1
value1.next = value2 = SimpleNamespace()
value2.val = 2
serializer.register_custom_type(SimpleNamespace, typename='Simple')
data = serializer.serialize(value1)
obj = serializer.deserialize(data)
assert obj.val == 1
assert obj.next.val == 2
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.