repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
redhat-openstack/django | django/contrib/formtools/wizard/storage/cookie.py | 138 | 1050 | import json
from django.core.signing import BadSignature
from django.contrib.formtools.exceptions import WizardViewCookieModified
from django.contrib.formtools.wizard import storage
class CookieStorage(storage.BaseStorage):
encoder = json.JSONEncoder(separators=(',', ':'))
def __init__(self, *args, **kwargs):
super(CookieStorage, self).__init__(*args, **kwargs)
self.data = self.load_data()
if self.data is None:
self.init_data()
def load_data(self):
try:
data = self.request.get_signed_cookie(self.prefix)
except KeyError:
data = None
except BadSignature:
raise WizardViewCookieModified('WizardView cookie manipulated')
if data is None:
return None
return json.loads(data, cls=json.JSONDecoder)
def update_response(self, response):
if self.data:
response.set_signed_cookie(self.prefix, self.encoder.encode(self.data))
else:
response.delete_cookie(self.prefix)
| bsd-3-clause |
dyyi/moneybook | venv/Lib/site-packages/pip/_vendor/requests/packages/urllib3/util/url.py | 713 | 5879 | from __future__ import absolute_import
from collections import namedtuple
from ..exceptions import LocationParseError
url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
class Url(namedtuple('Url', url_attrs)):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
query=None, fragment=None):
if path and not path.startswith('/'):
path = '/' + path
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:password@host.com:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = ''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + '://'
if auth is not None:
url += auth + '@'
if host is not None:
url += host
if port is not None:
url += ':' + str(port)
if path is not None:
url += path
if query is not None:
url += '?' + query
if fragment is not None:
url += '#' + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx + 1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
if not url:
# Empty
return Url()
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers.
if not port.isdigit():
raise LocationParseError(url)
port = int(port)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
| apache-2.0 |
sodafree/backend | build/ipython/IPython/frontend/html/notebook/notebookmanager.py | 3 | 11283 | """A notebook manager that uses the local file system for storage.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import datetime
import io
import os
import uuid
import glob
from tornado import web
from IPython.config.configurable import LoggingConfigurable
from IPython.nbformat import current
from IPython.utils.traitlets import Unicode, List, Dict, Bool, TraitError
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class NotebookManager(LoggingConfigurable):
notebook_dir = Unicode(os.getcwdu(), config=True, help="""
The directory to use for notebooks.
""")
def _notebook_dir_changed(self, name, old, new):
"""do a bit of validation of the notebook dir"""
if os.path.exists(new) and not os.path.isdir(new):
raise TraitError("notebook dir %r is not a directory" % new)
if not os.path.exists(new):
self.log.info("Creating notebook dir %s", new)
try:
os.mkdir(new)
except:
raise TraitError("Couldn't create notebook dir %r" % new)
save_script = Bool(False, config=True,
help="""Automatically create a Python script when saving the notebook.
For easier use of import, %run and %load across notebooks, a
<notebook-name>.py script will be created next to any
<notebook-name>.ipynb on each save. This can also be set with the
short `--script` flag.
"""
)
filename_ext = Unicode(u'.ipynb')
allowed_formats = List([u'json',u'py'])
# Map notebook_ids to notebook names
mapping = Dict()
# Map notebook names to notebook_ids
rev_mapping = Dict()
def list_notebooks(self):
"""List all notebooks in the notebook dir.
This returns a list of dicts of the form::
dict(notebook_id=notebook,name=name)
"""
names = glob.glob(os.path.join(self.notebook_dir,
'*' + self.filename_ext))
names = [os.path.splitext(os.path.basename(name))[0]
for name in names]
data = []
for name in names:
if name not in self.rev_mapping:
notebook_id = self.new_notebook_id(name)
else:
notebook_id = self.rev_mapping[name]
data.append(dict(notebook_id=notebook_id,name=name))
data = sorted(data, key=lambda item: item['name'])
return data
def new_notebook_id(self, name):
"""Generate a new notebook_id for a name and store its mappings."""
# TODO: the following will give stable urls for notebooks, but unless
# the notebooks are immediately redirected to their new urls when their
# filemname changes, nasty inconsistencies result. So for now it's
# disabled and instead we use a random uuid4() call. But we leave the
# logic here so that we can later reactivate it, whhen the necessary
# url redirection code is written.
#notebook_id = unicode(uuid.uuid5(uuid.NAMESPACE_URL,
# 'file://'+self.get_path_by_name(name).encode('utf-8')))
notebook_id = unicode(uuid.uuid4())
self.mapping[notebook_id] = name
self.rev_mapping[name] = notebook_id
return notebook_id
def delete_notebook_id(self, notebook_id):
"""Delete a notebook's id only. This doesn't delete the actual notebook."""
name = self.mapping[notebook_id]
del self.mapping[notebook_id]
del self.rev_mapping[name]
def notebook_exists(self, notebook_id):
"""Does a notebook exist?"""
if notebook_id not in self.mapping:
return False
path = self.get_path_by_name(self.mapping[notebook_id])
return os.path.isfile(path)
def find_path(self, notebook_id):
"""Return a full path to a notebook given its notebook_id."""
try:
name = self.mapping[notebook_id]
except KeyError:
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
return self.get_path_by_name(name)
def get_path_by_name(self, name):
"""Return a full path to a notebook given its name."""
filename = name + self.filename_ext
path = os.path.join(self.notebook_dir, filename)
return path
def get_notebook(self, notebook_id, format=u'json'):
"""Get the representation of a notebook in format by notebook_id."""
format = unicode(format)
if format not in self.allowed_formats:
raise web.HTTPError(415, u'Invalid notebook format: %s' % format)
last_modified, nb = self.get_notebook_object(notebook_id)
kwargs = {}
if format == 'json':
# don't split lines for sending over the wire, because it
# should match the Python in-memory format.
kwargs['split_lines'] = False
data = current.writes(nb, format, **kwargs)
name = nb.metadata.get('name','notebook')
return last_modified, name, data
def get_notebook_object(self, notebook_id):
"""Get the NotebookNode representation of a notebook by notebook_id."""
path = self.find_path(notebook_id)
if not os.path.isfile(path):
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
info = os.stat(path)
last_modified = datetime.datetime.utcfromtimestamp(info.st_mtime)
with open(path,'r') as f:
s = f.read()
try:
# v1 and v2 and json in the .ipynb files.
nb = current.reads(s, u'json')
except:
raise web.HTTPError(500, u'Unreadable JSON notebook.')
# Always use the filename as the notebook name.
nb.metadata.name = os.path.splitext(os.path.basename(path))[0]
return last_modified, nb
def save_new_notebook(self, data, name=None, format=u'json'):
"""Save a new notebook and return its notebook_id.
If a name is passed in, it overrides any values in the notebook data
and the value in the data is updated to use that value.
"""
if format not in self.allowed_formats:
raise web.HTTPError(415, u'Invalid notebook format: %s' % format)
try:
nb = current.reads(data.decode('utf-8'), format)
except:
raise web.HTTPError(400, u'Invalid JSON data')
if name is None:
try:
name = nb.metadata.name
except AttributeError:
raise web.HTTPError(400, u'Missing notebook name')
nb.metadata.name = name
notebook_id = self.new_notebook_id(name)
self.save_notebook_object(notebook_id, nb)
return notebook_id
def save_notebook(self, notebook_id, data, name=None, format=u'json'):
"""Save an existing notebook by notebook_id."""
if format not in self.allowed_formats:
raise web.HTTPError(415, u'Invalid notebook format: %s' % format)
try:
nb = current.reads(data.decode('utf-8'), format)
except:
raise web.HTTPError(400, u'Invalid JSON data')
if name is not None:
nb.metadata.name = name
self.save_notebook_object(notebook_id, nb)
def save_notebook_object(self, notebook_id, nb):
"""Save an existing notebook object by notebook_id."""
if notebook_id not in self.mapping:
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
old_name = self.mapping[notebook_id]
try:
new_name = nb.metadata.name
except AttributeError:
raise web.HTTPError(400, u'Missing notebook name')
path = self.get_path_by_name(new_name)
try:
with open(path,'w') as f:
current.write(nb, f, u'json')
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while saving notebook: %s' % e)
# save .py script as well
if self.save_script:
pypath = os.path.splitext(path)[0] + '.py'
try:
with io.open(pypath,'w', encoding='utf-8') as f:
current.write(nb, f, u'py')
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while saving notebook as script: %s' % e)
if old_name != new_name:
old_path = self.get_path_by_name(old_name)
if os.path.isfile(old_path):
os.unlink(old_path)
if self.save_script:
old_pypath = os.path.splitext(old_path)[0] + '.py'
if os.path.isfile(old_pypath):
os.unlink(old_pypath)
self.mapping[notebook_id] = new_name
self.rev_mapping[new_name] = notebook_id
del self.rev_mapping[old_name]
def delete_notebook(self, notebook_id):
"""Delete notebook by notebook_id."""
path = self.find_path(notebook_id)
if not os.path.isfile(path):
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
os.unlink(path)
self.delete_notebook_id(notebook_id)
def increment_filename(self, basename):
"""Return a non-used filename of the form basename<int>.
This searches through the filenames (basename0, basename1, ...)
until is find one that is not already being used. It is used to
create Untitled and Copy names that are unique.
"""
i = 0
while True:
name = u'%s%i' % (basename,i)
path = self.get_path_by_name(name)
if not os.path.isfile(path):
break
else:
i = i+1
return path, name
def new_notebook(self):
"""Create a new notebook and return its notebook_id."""
path, name = self.increment_filename('Untitled')
notebook_id = self.new_notebook_id(name)
metadata = current.new_metadata(name=name)
nb = current.new_notebook(metadata=metadata)
with open(path,'w') as f:
current.write(nb, f, u'json')
return notebook_id
def copy_notebook(self, notebook_id):
"""Copy an existing notebook and return its notebook_id."""
last_mod, nb = self.get_notebook_object(notebook_id)
name = nb.metadata.name + '-Copy'
path, name = self.increment_filename(name)
nb.metadata.name = name
notebook_id = self.new_notebook_id(name)
self.save_notebook_object(notebook_id, nb)
return notebook_id
| bsd-3-clause |
ace-han/wulinfan | apps/gateway/views.py | 55 | 1780 | import logging
from django.views import generic
from django.contrib.auth.models import User
from django.contrib import messages
from django.core.mail import send_mail
from django import http
from django.core.urlresolvers import reverse
from django.template.loader import get_template
from django.template import Context
from apps.gateway import forms
from oscar.apps.customer.forms import generate_username
logger = logging.getLogger('gateway')
class GatewayView(generic.FormView):
template_name = 'gateway/form.html'
form_class = forms.GatewayForm
def form_valid(self, form):
real_email = form.cleaned_data['email']
username = generate_username()
password = generate_username()
email = 'dashboard-user-%s@oscarcommerce.com' % username
user = self.create_dashboard_user(username, email, password)
self.send_confirmation_email(real_email, user, password)
logger.info("Created dashboard user #%d for %s",
user.id, real_email)
messages.success(
self.request,
"The credentials for a dashboard user have been sent to %s" % real_email)
return http.HttpResponseRedirect(reverse('gateway'))
def create_dashboard_user(self, username, email, password):
user = User.objects.create_user(username, email, password)
user.is_staff = True
user.save()
return user
def send_confirmation_email(self, real_email, user, password):
msg = get_template('gateway/email.txt').render(Context({
'email': user.email,
'password': password
}))
send_mail('Dashboard access to Oscar sandbox',
msg, 'blackhole@latest.oscarcommerce.com',
[real_email])
| bsd-3-clause |
DESHRAJ/fjord | vendor/packages/translate-toolkit/translate/lang/el.py | 4 | 2071 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007-2009,2011 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""This module represents the Greek language.
.. seealso:: http://en.wikipedia.org/wiki/Greek_language
"""
import re
from translate.lang import common
class el(common.Common):
"""This class represents Greek."""
# Greek uses ; as question mark and the middot instead
sentenceend = u".!;…"
sentencere = re.compile(ur"""
(?s) # make . also match newlines
.*? # anything, but match non-greedy
[%s] # the puntuation for sentence ending
\s+ # the spacing after the puntuation
(?=[^a-zά-ώ\d]) # lookahead that next part starts with caps
""" % sentenceend, re.VERBOSE | re.UNICODE
)
puncdict = {
u"?": u";",
u";": u"·",
}
# Valid latin characters for use as accelerators
valid_latin_accel = u"abcdefghijklmnopqrstuvwxyz" + \
u"ABCDEFGHIJKLMNOPQRSTUVWXYZ" + \
u"1234567890"
# Valid greek characters for use as accelerators (accented characters
# and "ς" omitted)
valid_greek_accel = u"αβγδεζηθικλμνξοπρστυφχψω" + \
u"ΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩ"
# Valid accelerators
validaccel = u"".join([valid_latin_accel, valid_greek_accel])
| bsd-3-clause |
eayunstack/ceilometer | ceilometer/storage/sqlalchemy/migrate_repo/versions/036_drop_sourceassoc_resource_tables.py | 9 | 2781 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate import ForeignKeyConstraint
import sqlalchemy as sa
from ceilometer.storage.sqlalchemy import migration
TABLES = ['sample', 'resource', 'source', 'sourceassoc']
DROP_TABLES = ['resource', 'source', 'sourceassoc']
INDEXES = {
"sample": (('resource_id', 'resource', 'id'),),
"sourceassoc": (('sample_id', 'sample', 'id'),
('resource_id', 'resource', 'id'),
('source_id', 'source', 'id'))
}
def upgrade(migrate_engine):
meta = sa.MetaData(bind=migrate_engine)
load_tables = dict((table_name, sa.Table(table_name, meta,
autoload=True))
for table_name in TABLES)
# drop foreign keys
if migrate_engine.name != 'sqlite':
for table_name, indexes in INDEXES.items():
table = load_tables[table_name]
for column, ref_table_name, ref_column_name in indexes:
ref_table = load_tables[ref_table_name]
params = {'columns': [table.c[column]],
'refcolumns': [ref_table.c[ref_column_name]]}
fk_table_name = table_name
if migrate_engine.name == "mysql":
params['name'] = "_".join(('fk', fk_table_name, column))
elif (migrate_engine.name == "postgresql" and
table_name == 'sample'):
# fk was not renamed in script 030
params['name'] = "_".join(('meter', column, 'fkey'))
fkey = ForeignKeyConstraint(**params)
fkey.drop()
# create source field in sample
sample = load_tables['sample']
sample.create_column(sa.Column('source_id', sa.String(255)))
# move source values to samples
sourceassoc = load_tables['sourceassoc']
query = (sa.select([sourceassoc.c.sample_id, sourceassoc.c.source_id]).
where(sourceassoc.c.sample_id.isnot(None)))
for sample_id, source_id in migration.paged(query):
(sample.update().where(sample_id == sample.c.id).
values({'source_id': source_id}).execute())
# drop tables
for table_name in DROP_TABLES:
sa.Table(table_name, meta, autoload=True).drop()
| apache-2.0 |
Pluto-tv/chromium-crosswalk | tools/telemetry/third_party/gsutilz/third_party/boto/tests/unit/ses/test_identity.py | 100 | 7581 | #!/usr/bin/env python
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.jsonresponse import ListElement
from boto.ses.connection import SESConnection
class TestSESIdentity(AWSMockServiceTestCase):
connection_class = SESConnection
def setUp(self):
super(TestSESIdentity, self).setUp()
def default_body(self):
return b"""<GetIdentityDkimAttributesResponse \
xmlns="http://ses.amazonaws.com/doc/2010-12-01/">
<GetIdentityDkimAttributesResult>
<DkimAttributes>
<entry>
<key>test@amazon.com</key>
<value>
<DkimEnabled>true</DkimEnabled>
<DkimVerificationStatus>Success</DkimVerificationStatus>
<DkimTokens>
<member>vvjuipp74whm76gqoni7qmwwn4w4qusjiainivf6f</member>
<member>3frqe7jn4obpuxjpwpolz6ipb3k5nvt2nhjpik2oy</member>
<member>wrqplteh7oodxnad7hsl4mixg2uavzneazxv5sxi2</member>
</DkimTokens>
</value>
</entry>
<entry>
<key>secondtest@amazon.com</key>
<value>
<DkimEnabled>false</DkimEnabled>
<DkimVerificationStatus>NotStarted</DkimVerificationStatus>
</value>
</entry>
</DkimAttributes>
</GetIdentityDkimAttributesResult>
<ResponseMetadata>
<RequestId>bb5a105d-c468-11e1-82eb-dff885ccc06a</RequestId>
</ResponseMetadata>
</GetIdentityDkimAttributesResponse>"""
def test_ses_get_identity_dkim_list(self):
self.set_http_response(status_code=200)
response = self.service_connection\
.get_identity_dkim_attributes(['test@amazon.com', 'secondtest@amazon.com'])
response = response['GetIdentityDkimAttributesResponse']
result = response['GetIdentityDkimAttributesResult']
first_entry = result['DkimAttributes'][0]
entry_key = first_entry['key']
attributes = first_entry['value']
tokens = attributes['DkimTokens']
self.assertEqual(entry_key, 'test@amazon.com')
self.assertEqual(ListElement, type(tokens))
self.assertEqual(3, len(tokens))
self.assertEqual('vvjuipp74whm76gqoni7qmwwn4w4qusjiainivf6f',
tokens[0])
self.assertEqual('3frqe7jn4obpuxjpwpolz6ipb3k5nvt2nhjpik2oy',
tokens[1])
self.assertEqual('wrqplteh7oodxnad7hsl4mixg2uavzneazxv5sxi2',
tokens[2])
second_entry = result['DkimAttributes'][1]
entry_key = second_entry['key']
attributes = second_entry['value']
dkim_enabled = attributes['DkimEnabled']
dkim_verification_status = attributes['DkimVerificationStatus']
self.assertEqual(entry_key, 'secondtest@amazon.com')
self.assertEqual(dkim_enabled, 'false')
self.assertEqual(dkim_verification_status, 'NotStarted')
class TestSESSetIdentityNotificationTopic(AWSMockServiceTestCase):
connection_class = SESConnection
def setUp(self):
super(TestSESSetIdentityNotificationTopic, self).setUp()
def default_body(self):
return b"""<SetIdentityNotificationTopicResponse \
xmlns="http://ses.amazonaws.com/doc/2010-12-01/">
<SetIdentityNotificationTopicResult/>
<ResponseMetadata>
<RequestId>299f4af4-b72a-11e1-901f-1fbd90e8104f</RequestId>
</ResponseMetadata>
</SetIdentityNotificationTopicResponse>"""
def test_ses_set_identity_notification_topic_bounce(self):
self.set_http_response(status_code=200)
response = self.service_connection\
.set_identity_notification_topic(
identity='user@example.com',
notification_type='Bounce',
sns_topic='arn:aws:sns:us-east-1:123456789012:example')
response = response['SetIdentityNotificationTopicResponse']
result = response['SetIdentityNotificationTopicResult']
self.assertEqual(2, len(response))
self.assertEqual(0, len(result))
def test_ses_set_identity_notification_topic_complaint(self):
self.set_http_response(status_code=200)
response = self.service_connection\
.set_identity_notification_topic(
identity='user@example.com',
notification_type='Complaint',
sns_topic='arn:aws:sns:us-east-1:123456789012:example')
response = response['SetIdentityNotificationTopicResponse']
result = response['SetIdentityNotificationTopicResult']
self.assertEqual(2, len(response))
self.assertEqual(0, len(result))
class TestSESSetIdentityFeedbackForwardingEnabled(AWSMockServiceTestCase):
connection_class = SESConnection
def setUp(self):
super(TestSESSetIdentityFeedbackForwardingEnabled, self).setUp()
def default_body(self):
return b"""<SetIdentityFeedbackForwardingEnabledResponse \
xmlns="http://ses.amazonaws.com/doc/2010-12-01/">
<SetIdentityFeedbackForwardingEnabledResult/>
<ResponseMetadata>
<RequestId>299f4af4-b72a-11e1-901f-1fbd90e8104f</RequestId>
</ResponseMetadata>
</SetIdentityFeedbackForwardingEnabledResponse>"""
def test_ses_set_identity_feedback_forwarding_enabled_true(self):
self.set_http_response(status_code=200)
response = self.service_connection\
.set_identity_feedback_forwarding_enabled(
identity='user@example.com',
forwarding_enabled=True)
response = response['SetIdentityFeedbackForwardingEnabledResponse']
result = response['SetIdentityFeedbackForwardingEnabledResult']
self.assertEqual(2, len(response))
self.assertEqual(0, len(result))
def test_ses_set_identity_notification_topic_enabled_false(self):
self.set_http_response(status_code=200)
response = self.service_connection\
.set_identity_feedback_forwarding_enabled(
identity='user@example.com',
forwarding_enabled=False)
response = response['SetIdentityFeedbackForwardingEnabledResponse']
result = response['SetIdentityFeedbackForwardingEnabledResult']
self.assertEqual(2, len(response))
self.assertEqual(0, len(result))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
atupone/xbmc | lib/libUPnP/Neptune/Extras/Tools/Logging/NeptuneLogConsoleMulticast.py | 202 | 3159 | #!/usr/bin/env python
from struct import *
from socket import *
from optparse import OptionParser
UDP_ADDR = "0.0.0.0"
UDP_MULTICAST_ADDR = "239.255.255.100"
UDP_PORT = 7724
BUFFER_SIZE = 65536
#HEADER_KEYS = ['Logger', 'Level', 'Source-File', 'Source-Function', 'Source-Line', 'TimeStamp']
HEADER_KEYS = {
'mini': ('Level'),
'standard': ('Logger', 'Level', 'Source-Function'),
'long': ('Logger', 'Level', 'Source-File', 'Source-Line', 'Source-Function'),
'all': ('Logger', 'Level', 'Source-File', 'Source-Line', 'Source-Function', 'TimeStamp'),
'custom': ()
}
Senders = {}
class LogRecord:
def __init__(self, data):
offset = 0
self.headers = {}
for line in data.split("\r\n"):
offset += len(line)+2
if ':' not in line: break
key,value=line.split(":",1)
self.headers[key] = value.strip()
self.body = data[offset:]
def __getitem__(self, index):
return self.headers[index]
def format(self, sender_index, keys):
parts = ['['+str(sender_index)+']']
if 'Level' in keys:
parts.append('['+self.headers['Level']+']')
if 'Logger' in keys:
parts.append(self.headers['Logger'])
if 'TimeStamp' in keys:
parts.append(self.headers['TimeStamp'])
if 'Source-File' in keys:
if 'Source-Line' in keys:
parts.append(self.headers['Source-File']+':'+self.headers['Source-Line'])
else:
parts.append(self.headers['Source-File'])
if 'TimeStamp' in keys:
parts.append(self.headers['TimeStamp'])
if 'Source-Function' in keys:
parts.append(self.headers['Source-Function'])
parts.append(self.body)
return ' '.join(parts)
class Listener:
def __init__(self, format='standard', port=UDP_PORT):
self.socket = socket(AF_INET,SOCK_DGRAM)
self.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
mreq = pack("4sl", inet_aton(UDP_MULTICAST_ADDR), INADDR_ANY)
self.socket.setsockopt(IPPROTO_IP, IP_ADD_MEMBERSHIP, mreq)
self.socket.bind((UDP_ADDR, port))
self.format_keys = HEADER_KEYS[format]
def listen(self):
while True:
data,addr = self.socket.recvfrom(BUFFER_SIZE)
sender_index = len(Senders.keys())
if addr in Senders:
sender_index = Senders[addr]
else:
print "### NEW SENDER:", addr
Senders[addr] = sender_index
record = LogRecord(data)
print record.format(sender_index, self.format_keys)
### main
parser = OptionParser(usage="%prog [options]")
parser.add_option("-p", "--port", dest="port", help="port number to listen on", type="int", default=UDP_PORT)
parser.add_option("-f", "--format", dest="format", help="log format (mini, standard, long, or all)", choices=('mini', 'standard', 'long', 'all'), default='standard')
(options, args) = parser.parse_args()
print "Listening on port", options.port
l = Listener(format=options.format, port=options.port)
l.listen()
| gpl-2.0 |
thnee/ansible | test/units/modules/network/f5/test_bigip_monitor_https.py | 22 | 14164 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_monitor_https import Parameters
from library.modules.bigip_monitor_https import ModuleManager
from library.modules.bigip_monitor_https import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_monitor_https import Parameters
from ansible.modules.network.f5.bigip_monitor_https import ModuleManager
from ansible.modules.network.f5.bigip_monitor_https import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
parent='parent',
send='this is a send string',
receive='this is a receive string',
ip='10.10.10.10',
port=80,
interval=20,
timeout=30,
time_until_up=60,
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/parent'
assert p.send == 'this is a send string'
assert p.receive == 'this is a receive string'
assert p.ip == '10.10.10.10'
assert p.type == 'https'
assert p.port == 80
assert p.destination == '10.10.10.10:80'
assert p.interval == 20
assert p.timeout == 30
assert p.time_until_up == 60
def test_module_parameters_ints_as_strings(self):
args = dict(
name='foo',
parent='parent',
send='this is a send string',
receive='this is a receive string',
ip='10.10.10.10',
port='80',
interval='20',
timeout='30',
time_until_up='60',
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/parent'
assert p.send == 'this is a send string'
assert p.receive == 'this is a receive string'
assert p.ip == '10.10.10.10'
assert p.type == 'https'
assert p.port == 80
assert p.destination == '10.10.10.10:80'
assert p.interval == 20
assert p.timeout == 30
assert p.time_until_up == 60
def test_api_parameters(self):
args = dict(
name='foo',
defaultsFrom='/Common/parent',
send='this is a send string',
recv='this is a receive string',
destination='10.10.10.10:80',
interval=20,
timeout=30,
timeUntilUp=60
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/parent'
assert p.send == 'this is a send string'
assert p.receive == 'this is a receive string'
assert p.ip == '10.10.10.10'
assert p.type == 'https'
assert p.port == 80
assert p.destination == '10.10.10.10:80'
assert p.interval == 20
assert p.timeout == 30
assert p.time_until_up == 60
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_monitor(self, *args):
set_module_args(dict(
name='foo',
parent='parent',
send='this is a send string',
receive='this is a receive string',
ip='10.10.10.10',
port=80,
interval=20,
timeout=30,
time_until_up=60,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['parent'] == '/Common/parent'
def test_create_monitor_idempotent(self, *args):
set_module_args(dict(
name='asdf',
parent='https',
send='GET /\\r\\n',
receive='hello world',
ip='1.1.1.1',
port=389,
interval=5,
timeout=16,
time_until_up=0,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = Parameters(params=load_fixture('load_ltm_monitor_https.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is False
def test_update_port(self, *args):
set_module_args(dict(
name='asdf',
port=800,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = Parameters(params=load_fixture('load_ltm_monitor_https.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['port'] == 800
def test_update_interval(self, *args):
set_module_args(dict(
name='foo',
interval=10,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = Parameters(params=load_fixture('load_ltm_monitor_https.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['interval'] == 10
def test_update_interval_larger_than_existing_timeout(self, *args):
set_module_args(dict(
name='asdf',
interval=30,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = Parameters(params=load_fixture('load_ltm_monitor_https.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
with pytest.raises(F5ModuleError) as ex:
mm.exec_module()
assert "must be less than" in str(ex.value)
def test_update_interval_larger_than_new_timeout(self, *args):
set_module_args(dict(
name='asdf',
interval=10,
timeout=5,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = Parameters(params=load_fixture('load_ltm_monitor_https.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
with pytest.raises(F5ModuleError) as ex:
mm.exec_module()
assert "must be less than" in str(ex.value)
def test_update_send(self, *args):
set_module_args(dict(
name='asdf',
send='this is another send string',
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = Parameters(params=load_fixture('load_ltm_monitor_https.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['send'] == 'this is another send string'
def test_update_receive(self, *args):
set_module_args(dict(
name='asdf',
receive='this is another receive string',
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = Parameters(params=load_fixture('load_ltm_monitor_https.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['receive'] == 'this is another receive string'
def test_update_timeout(self, *args):
set_module_args(dict(
name='asdf',
timeout=300,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = Parameters(params=load_fixture('load_ltm_monitor_https.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['timeout'] == 300
def test_update_time_until_up(self, *args):
set_module_args(dict(
name='asdf',
time_until_up=300,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = Parameters(params=load_fixture('load_ltm_monitor_https.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['time_until_up'] == 300
| gpl-3.0 |
tafaRU/account-financial-tools | __unported__/account_credit_control_dunning_fees/model/line.py | 9 | 1155 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class credit_control_line(orm.Model):
"""Add dunning_fees_amount_fees field"""
_inherit = "credit.control.line"
_columns = {'dunning_fees_amount': fields.float('Fees')}
| agpl-3.0 |
happykhan/mlst_comp | src/main.py | 1 | 16402 | #!/usr/bin/env python
# Copyright (C) 2016. Nabil-Fared Alikhan
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
I don't really know, but it compares MLST Methods
I still don't really know, but it compares MLST Methods
### CHANGE LOG ###
2016-11-22 <Nabil-Fareed Alikhan> <n-f.alikhan@warwick.ac.uk>
* Initial build
"""
import sys, os, traceback, argparse
import time
import __init__ as meta
from docker import Client
import logging
import urllib2
import re
from subprocess import Popen, PIPE
import shutil
import gzip
from os.path import expanduser
epi = "Licence: "+ meta.__licence__ + " by " +meta.__author__ + " <" +meta.__author_email__ + ">"
def run_method(args):
data_dict = {}
method = args.mlst_method
output_file = args.output
# Open dataset file
if not output_file:
output_file = '%s.updated.tsv' %args.dataset
with open(args.dataset) as data_f:
headers = data_f.readline().strip().split('\t')
# For each record,
for record in data_f.readlines():
vals = []
for val in record.split('\t'):
vals.append(val.strip())
record_dict = dict(zip(headers, vals))
data_dict[record_dict['SRR_Acc_code']] = record_dict
for idx, record_name in enumerate(data_dict):
record_dict = data_dict[record_name]
# Check if ST if already called:
method_list = [method]
if method == 'all':
method_list = ['stringMLST', 'MOST', 'Ariba']
existing = [method_name for method_name in method_list if record_dict.get('%s_ST' %method_name)]
if len(existing) != len(method_list) or args.clean:
# Fetch reads
for read_pair in ['1', '2']:
if not record_dict.get('Download_loc_%s' %read_pair):
acc = record_dict.get('SRR_Acc_code')
if len(acc) > 9:
pos = 9 - len(acc)
acc_fill = acc[pos].zfill(3)
record_dict['Download_loc_%s' %read_pair] = 'ftp://ftp.sra.ebi.ac.uk/vol1/fastq/%s/%s/%s/%s_%s.fastq.gz' %(acc[0:6],
acc_fill,
acc,
acc,
read_pair)
else:
record_dict['Download_loc_%s' %read_pair] = 'ftp://ftp.sra.ebi.ac.uk/vol1/fastq/%s/%s/%s_%s.fastq.gz' %(acc[0:6],
acc,
acc,
read_pair)
read_loc_1 = _get_reads(record_dict['Download_loc_1'], clean = True)
read_loc_2 = _get_reads(record_dict['Download_loc_2'])
if not read_loc_1 or not read_loc_2:
logger.error(' Could not fetch reads from %s ' %record_dict['Download_loc_1'])
continue
# Run method, return ST and running time
if method == 'stringMLST' or method == 'all':
ST = None
runtime = None
if not record_dict.get('stringMLST_ST') or args.clean:
ST, runtime = run_string_mlst(read_loc_1, read_loc_2)
if ST:
record_dict['stringMLST_ST'] = ST
if runtime:
record_dict['stringMLST_runtime'] = runtime
if method == 'MOST' or method == 'all':
ST = None
runtime = None
if not record_dict.get('MOST_ST') or args.clean:
ST, runtime = run_most(read_loc_1, read_loc_2)
if ST:
record_dict['MOST_ST'] = ST
if runtime:
record_dict['MOST_runtime'] = runtime
if method == 'Ariba' or method == 'all':
ST = None
runtime = None
if not record_dict.get('Ariba_ST') or args.clean:
ST, runtime = run_ariba(read_loc_1, read_loc_2)
if ST:
record_dict['Ariba_ST'] = ST
if runtime:
record_dict['Ariba_runtime'] = runtime
data_dict[record_dict['SRR_Acc_code']] = record_dict
_write_output(output_file, data_dict)
if idx % 5 == 0 or idx == (len(data_dict)-1):
_draw_graph(output_file, args.html)
def _draw_graph(data_file, html_dir):
from bokeh.charts import BoxPlot, output_file, save
from pandas import read_csv, DataFrame
df = read_csv(data_file, sep='\t')
df = df.set_index('SRR_Acc_code')
labels = [str(col) for col in df.columns if col.endswith('_runtime')]
df_perf = DataFrame()
for label in labels:
x = DataFrame(zip(df[label], [label.split('_')[0]] * len(df[label])), columns= ['Seconds', 'MLST Caller'])
df_perf = df_perf.append(x)
output_name = os.path.splitext(os.path.basename(data_file))[0]
output_dir = os.path.join(html_dir, output_name)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
p = BoxPlot(df_perf, values='Seconds',label='MLST Caller', title="Runtime for MLST callers")
output_file(os.path.join(output_dir, '%s-runtime.html' %output_name))
save(p)
def _write_output(output_file, data_list):
# Write new dataset file including new ST calling
first = True
with open(output_file, 'w') as output_f:
for record_name in data_list:
record = data_list[record_name]
if first:
header_list = sorted(record.keys())
output_f.write('%s\n' %('\t'.join(header_list)))
first = False
row_list = []
for header in header_list:
row_list.append(record.get(header,''))
output_f.write('%s\n' %'\t'.join(row_list))
def run_string_mlst(read_file_1, read_file_2):
'''Runs stringMLST on given read set,
returns ST and runtime as a string
'''
cli = Client(base_url='tcp://127.0.0.1:2375')
logger.debug('Building StringMLST container...')
url = 'https://raw.githubusercontent.com/andrewjpage/docker_mlst/master/stringMLST/Dockerfile'
response = urllib2.urlopen(urllib2.Request(url))
docker_file_path = os.path.abspath(os.path.join('../temp', 'StringMLST-Dockerfile'))
with open(docker_file_path, 'w') as docker_f:
docker_f.write(response.read())
response = [line for line in cli.build(fileobj=open(docker_file_path))]
build_hash = None
for line in response:
regex = re.search('Successfully built (\w+)', line )
if regex:
build_hash = regex.group(1)
logger.debug('Built StringMLST container successfully : %s' %build_hash)
if build_hash:
logger.debug('Running StringMLST on %s' %os.path.basename(read_file_1))
p = Popen(['sudo',
'docker', 'run', '--rm', '-v', '%s:/reads' %os.path.dirname(read_file_1),
build_hash, 'stringMLST.py', '--predict', '-1', '/reads/%s' %os.path.basename(read_file_1),
'-2', '/reads/%s' %os.path.basename(read_file_2), '-p', '-k', '35',
'-P','/stringMLST/SE'],
stdout=PIPE)
start_time = time.time()
out = p.stdout.read()
runtime = time.time() - start_time
ST = out.strip().split('\t')[-1]
logger.debug('%s: ST %s in %s seconds ' %(os.path.basename(read_file_1),ST,runtime))
return str(ST), str(round(runtime,3))
return None, None
def run_most(read_file_1, read_file_2):
'''Runs MOST on given read set,
returns ST and runtime as a string
'''
cli = Client(base_url='tcp://127.0.0.1:2375')
logger.debug('Building MOST container...')
url = 'https://raw.githubusercontent.com/andrewjpage/docker_mlst/master/MOST/Dockerfile'
response = urllib2.urlopen(urllib2.Request(url))
docker_file_path = os.path.abspath(os.path.join('../temp', 'MOST-Dockerfile'))
with open(docker_file_path, 'w') as docker_f:
docker_f.write(response.read())
response = [line for line in cli.build(fileobj=open(docker_file_path))]
build_hash = None
for line in response:
regex = re.search('Successfully built (\w+)', line )
if regex:
build_hash = regex.group(1)
logger.debug('Built MOST container successfully : %s' %build_hash)
if build_hash:
logger.debug('Running MOST on %s' %os.path.basename(read_file_1))
if os.path.exists('../temp/MOST-temp'):
Popen(['sudo', 'rm', '-rf', '../temp/MOST-temp']).wait()
p = Popen(['sudo',
'docker', 'run', '--rm', '-v', '%s:/reads' %os.path.dirname(read_file_1),
build_hash, 'MOST.py', '-1', '/reads/%s' %os.path.basename(read_file_1),
'-2', '/reads/%s' %os.path.basename(read_file_2),
'-o', '/reads/MOST-temp',
'-st','/MOST/MLST_data/salmonella'],
stdout=PIPE)
start_time = time.time()
out = p.stdout.read()
runtime = time.time() - start_time
ST = None
MOST_output = '../temp/MOST-temp/%s_MLST_result.csv' %os.path.basename(read_file_1).split('.gz')[0]
if not os.path.exists(MOST_output):
return None, str(round(runtime,3))
with open(MOST_output) as most_f:
for line in most_f.readlines():
regex = re.search('st value:[,*]*([0-9]+)', line)
if regex:
ST = regex.group(1)
logger.debug('%s: ST %s in %s seconds '
%(os.path.basename(read_file_1),ST,runtime))
return str(ST), str(round(runtime,3))
return None, None
def run_ariba(read_file_1, read_file_2):
'''Runs Ariba on given read set,
returns ST and runtime as a string
'''
cli = Client(base_url='tcp://127.0.0.1:2375')
logger.debug('Building Ariba container...')
url = 'https://raw.githubusercontent.com/andrewjpage/docker_mlst/master/ariba/Dockerfile'
response = urllib2.urlopen(urllib2.Request(url))
docker_file_path = os.path.abspath(os.path.join('../temp', 'ariba-Dockerfile'))
with open(docker_file_path, 'w') as docker_f:
docker_f.write(response.read())
response = [line for line in cli.build(fileobj=open(docker_file_path))]
build_hash = None
for line in response:
regex = re.search('Successfully built (\w+)', line )
if regex:
build_hash = regex.group(1)
logger.debug('Built Ariba container successfully : %s' %build_hash)
if build_hash:
logger.debug('Running Ariba on %s' %os.path.basename(read_file_1))
if os.path.exists('../temp/ariba-temp'):
Popen(['sudo', 'rm', '-rf', '../temp/ariba-temp']).wait()
p = Popen(['sudo',
'docker', 'run', '--rm', '-v', '%s:/reads' %os.path.dirname(read_file_1), \
build_hash, 'ariba', 'run', '/salmonella_db/ref_db', \
'/reads/%s' %os.path.basename(read_file_1), \
'/reads/%s' %os.path.basename(read_file_2),\
'/reads/ariba-temp'],\
stdout=PIPE)
start_time = time.time()
out = p.stdout.read()
runtime = time.time() - start_time
allele_prof = {}
with open('../temp/ariba-temp/report.tsv') as ariba_f:
for line in ariba_f.readlines()[1:]:
out = line.split('\t')[0].split('.')
allele_prof[out[0]] = out[1]
with gzip.open('../datasets/Salmonella.UoW.profiles.list.gz', 'rb') as st_f:
headers = st_f.readline().strip().split('\t')
ST = None
for line in st_f.readlines():
st_prof = dict(zip(headers, line.strip().split('\t')))
match = 0
for locus in allele_prof.keys():
if allele_prof[locus] == st_prof[locus]:
match += 1
if match == 7:
ST = st_prof['ST']
break
logger.debug('%s: ST %s in %s seconds '
%(os.path.basename(read_file_1),ST,runtime))
return str(ST), str(round(runtime,3))
return None, None
def _get_reads(url, clean=False):
if not os.path.exists('../temp'):
os.mkdir('../temp')
read_path = os.path.join('../temp',url.split('/')[-1])
if not os.path.exists(read_path):
if clean:
num_files = len([name for name in os.listdir('../temp') if name.endswith('.gz')])
if num_files > 21:
logger.debug('Cleaning temp dir...')
for name in os.listdir('../temp'):
if name.endswith('.gz'):
os.remove(os.path.join('../temp', name))
logger.debug('Downloading read file %s...' %os.path.basename(read_path))
with open(read_path, 'wb') as read_f:
try:
response = urllib2.urlopen(urllib2.Request(url))
read_f.write(response.read())
except Exception :
return None
return os.path.abspath(read_path)
if __name__ == '__main__':
try:
start_time = time.time()
desc = __doc__.split('\n\n')[1].strip()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description=desc,epilog=epi)
parser.add_argument ('-v', '--verbose', action='store_true', default=False, help='verbose output')
parser.add_argument('--version', action='version', version='%(prog)s ' + meta.__version__)
parser.add_argument('-o','--output',action='store',help='output prefix')
subparsers = parser.add_subparsers(help='commands')
run_parser = subparsers.add_parser('run', help='Run MLST software over given dataset with Docker')
run_parser.add_argument('dataset', action='store', help='File location of dataset')
run_parser.add_argument('mlst_method', action='store', help='in silico typing MLST method', choices=['stringMLST', 'MOST', 'none', 'Ariba', 'all'])
run_parser.add_argument('-o','--output', action='store', help='Output file location, Default: <dataset>.updated.tsv', default=None)
run_parser.add_argument('-c', '--clean', action='store_true', help='Redo completed typing results', default=False)
run_parser.add_argument('-t', '--html', action='store', help='HTML output folder', default=os.path.join(expanduser('~'), 'public_html'))
run_parser.set_defaults(func=run_method)
args = parser.parse_args()
if args.verbose:
print "Executing @ " + time.asctime()
logger.setLevel(10)
args.func(args)
if args.verbose:
print "Ended @ " + time.asctime()
if args.verbose:
print 'total time in minutes:',
if args.verbose:
print (time.time() - start_time) / 60.0
sys.exit(0)
except KeyboardInterrupt, e: # Ctrl-C
raise e
except SystemExit, e: # sys.exit()
raise e
except Exception, e:
print 'ERROR, UNEXPECTED EXCEPTION'
print str(e)
traceback.print_exc()
os._exit(1) | gpl-3.0 |
turbokongen/home-assistant | tests/components/notify/test_persistent_notification.py | 13 | 1032 | """The tests for the notify.persistent_notification service."""
from homeassistant.components import notify
import homeassistant.components.persistent_notification as pn
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
async def test_async_send_message(hass: HomeAssistant):
"""Test sending a message to notify.persistent_notification service."""
await async_setup_component(hass, pn.DOMAIN, {"core": {}})
await async_setup_component(hass, notify.DOMAIN, {})
await hass.async_block_till_done()
message = {"message": "Hello", "title": "Test notification"}
await hass.services.async_call(
notify.DOMAIN, notify.SERVICE_PERSISTENT_NOTIFICATION, message
)
await hass.async_block_till_done()
entity_ids = hass.states.async_entity_ids(pn.DOMAIN)
assert len(entity_ids) == 1
state = hass.states.get(entity_ids[0])
assert state.attributes.get("message") == "Hello"
assert state.attributes.get("title") == "Test notification"
| apache-2.0 |
Stefan210/masterthesis_nao_robot | nao_driver/scripts/send_camera_info.py | 3 | 1306 | #!/usr/bin/env python
import roslib
roslib.load_manifest('nao_driver')
import rospy
from sensor_msgs.msg import Image, CameraInfo
from sensor_msgs.srv import SetCameraInfo
def fill_set_camera_info():
cam_info = CameraInfo()
cam_info.header.frame_id = '/CameraTop_frame'
cam_info.header.stamp = rospy.Time.now()
cam_info.P[0] = 640.0
cam_info.P[1] = 0.0
cam_info.P[2] = 320.0
cam_info.P[3] = 0
cam_info.P[4] = 0.0
cam_info.P[5] = 373.31
cam_info.P[6] = 120.0
cam_info.P[7] = 0.0
cam_info.P[8] = 0.0
cam_info.P[9] = 0.0
cam_info.P[10] = 1.0
cam_info.P[11] = 0.0
setCameraInfo = SetCameraInfo()
setCameraInfo.camera_info = cam_info
return setCameraInfo
def call_service():
setCameraInfo = fill_set_camera_info()
rospy.wait_for_service('set_camera_info')
try:
set_camera_info = rospy.ServiceProxy('set_camera_info', SetCameraInfo)
print "proxy ready"
response = set_camera_info(setCameraInfo.camera_info)
print response.status_message
return response.success
except rospy.ServiceException, e:
print "Service call failed: %s"%e
if __name__ == '__main__':
rospy.init_node('set_camera_info')
ret = call_service()
print "Return status is: ", ret
exit(ret)
| bsd-3-clause |
cctaylor/googleads-python-lib | examples/dfp/v201502/user_service/get_all_roles.py | 3 | 1402 | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all roles.
This sample can be used to determine which role id is needed when getting and
creating users."""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
user_service = client.GetService('UserService', version='v201502')
# Get all roles.
roles = user_service.getAllRoles()
# Display results.
for role in roles:
print ('Role with id \'%s\' and name \'%s\' was found.'
% (role['id'], role['name']))
print '\nNumber of results found: %s' % len(roles)
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| apache-2.0 |
ferabra/edx-platform | lms/djangoapps/edxnotes/helpers.py | 75 | 12610 | """
Helper methods related to EdxNotes.
"""
import json
import logging
import requests
from requests.exceptions import RequestException
from uuid import uuid4
from json import JSONEncoder
from datetime import datetime
from courseware.access import has_access
from courseware.views import get_current_child
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext as _
from capa.util import sanitize_html
from student.models import anonymous_id_for_user
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from util.date_utils import get_default_time_display
from dateutil.parser import parse as dateutil_parse
from provider.oauth2.models import AccessToken, Client
import oauth2_provider.oidc as oidc
from provider.utils import now
from opaque_keys.edx.keys import UsageKey
from .exceptions import EdxNotesParseError, EdxNotesServiceUnavailable
log = logging.getLogger(__name__)
HIGHLIGHT_TAG = "span"
HIGHLIGHT_CLASS = "note-highlight"
class NoteJSONEncoder(JSONEncoder):
"""
Custom JSON encoder that encode datetime objects to appropriate time strings.
"""
# pylint: disable=method-hidden
def default(self, obj):
if isinstance(obj, datetime):
return get_default_time_display(obj)
return json.JSONEncoder.default(self, obj)
def get_id_token(user):
"""
Generates JWT ID-Token, using or creating user's OAuth access token.
"""
try:
client = Client.objects.get(name="edx-notes")
except Client.DoesNotExist:
raise ImproperlyConfigured("OAuth2 Client with name 'edx-notes' is not present in the DB")
try:
access_token = AccessToken.objects.get(
client=client,
user=user,
expires__gt=now()
)
except AccessToken.DoesNotExist:
access_token = AccessToken(client=client, user=user)
access_token.save()
id_token = oidc.id_token(access_token)
secret = id_token.access_token.client.client_secret
return id_token.encode(secret)
def get_token_url(course_id):
"""
Returns token url for the course.
"""
return reverse("get_token", kwargs={
"course_id": unicode(course_id),
})
def send_request(user, course_id, path="", query_string=None):
"""
Sends a request with appropriate parameters and headers.
"""
url = get_internal_endpoint(path)
params = {
"user": anonymous_id_for_user(user, None),
"course_id": unicode(course_id).encode("utf-8"),
}
if query_string:
params.update({
"text": query_string,
"highlight": True,
"highlight_tag": HIGHLIGHT_TAG,
"highlight_class": HIGHLIGHT_CLASS,
})
try:
response = requests.get(
url,
headers={
"x-annotator-auth-token": get_id_token(user)
},
params=params
)
except RequestException:
raise EdxNotesServiceUnavailable(_("EdxNotes Service is unavailable. Please try again in a few minutes."))
return response
def get_parent_unit(xblock):
"""
Find vertical that is a unit, not just some container.
"""
while xblock:
xblock = xblock.get_parent()
if xblock is None:
return None
parent = xblock.get_parent()
if parent is None:
return None
if parent.category == 'sequential':
return xblock
def preprocess_collection(user, course, collection):
"""
Prepare `collection(notes_list)` provided by edx-notes-api
for rendering in a template:
add information about ancestor blocks,
convert "updated" to date
Raises:
ItemNotFoundError - when appropriate module is not found.
"""
# pylint: disable=too-many-statements
store = modulestore()
filtered_collection = list()
cache = {}
with store.bulk_operations(course.id):
for model in collection:
update = {
u"text": sanitize_html(model["text"]),
u"quote": sanitize_html(model["quote"]),
u"updated": dateutil_parse(model["updated"]),
}
if "tags" in model:
update[u"tags"] = [sanitize_html(tag) for tag in model["tags"]]
model.update(update)
usage_id = model["usage_id"]
if usage_id in cache:
model.update(cache[usage_id])
filtered_collection.append(model)
continue
usage_key = UsageKey.from_string(usage_id)
# Add a course run if necessary.
usage_key = usage_key.replace(course_key=store.fill_in_run(usage_key.course_key))
try:
item = store.get_item(usage_key)
except ItemNotFoundError:
log.debug("Module not found: %s", usage_key)
continue
if not has_access(user, "load", item, course_key=course.id):
log.debug("User %s does not have an access to %s", user, item)
continue
unit = get_parent_unit(item)
if unit is None:
log.debug("Unit not found: %s", usage_key)
continue
section = unit.get_parent()
if not section:
log.debug("Section not found: %s", usage_key)
continue
if section in cache:
usage_context = cache[section]
usage_context.update({
"unit": get_module_context(course, unit),
})
model.update(usage_context)
cache[usage_id] = cache[unit] = usage_context
filtered_collection.append(model)
continue
chapter = section.get_parent()
if not chapter:
log.debug("Chapter not found: %s", usage_key)
continue
if chapter in cache:
usage_context = cache[chapter]
usage_context.update({
"unit": get_module_context(course, unit),
"section": get_module_context(course, section),
})
model.update(usage_context)
cache[usage_id] = cache[unit] = cache[section] = usage_context
filtered_collection.append(model)
continue
usage_context = {
"unit": get_module_context(course, unit),
"section": get_module_context(course, section),
"chapter": get_module_context(course, chapter),
}
model.update(usage_context)
cache[usage_id] = cache[unit] = cache[section] = cache[chapter] = usage_context
filtered_collection.append(model)
return filtered_collection
def get_module_context(course, item):
"""
Returns dispay_name and url for the parent module.
"""
item_dict = {
'location': unicode(item.location),
'display_name': item.display_name_with_default,
}
if item.category == 'chapter' and item.get_parent():
# course is a locator w/o branch and version
# so for uniformity we replace it with one that has them
course = item.get_parent()
item_dict['index'] = get_index(item_dict['location'], course.children)
elif item.category == 'vertical':
section = item.get_parent()
chapter = section.get_parent()
# Position starts from 1, that's why we add 1.
position = get_index(unicode(item.location), section.children) + 1
item_dict['url'] = reverse('courseware_position', kwargs={
'course_id': unicode(course.id),
'chapter': chapter.url_name,
'section': section.url_name,
'position': position,
})
if item.category in ('chapter', 'sequential'):
item_dict['children'] = [unicode(child) for child in item.children]
return item_dict
def get_index(usage_key, children):
"""
Returns an index of the child with `usage_key`.
"""
children = [unicode(child) for child in children]
return children.index(usage_key)
def search(user, course, query_string):
"""
Returns search results for the `query_string(str)`.
"""
response = send_request(user, course.id, "search", query_string)
try:
content = json.loads(response.content)
collection = content["rows"]
except (ValueError, KeyError):
log.warning("invalid JSON: %s", response.content)
raise EdxNotesParseError(_("Server error. Please try again in a few minutes."))
content.update({
"rows": preprocess_collection(user, course, collection)
})
return json.dumps(content, cls=NoteJSONEncoder)
def get_notes(user, course):
"""
Returns all notes for the user.
"""
response = send_request(user, course.id, "annotations")
try:
collection = json.loads(response.content)
except ValueError:
return None
if not collection:
return None
return json.dumps(preprocess_collection(user, course, collection), cls=NoteJSONEncoder)
def get_endpoint(api_url, path=""):
"""
Returns edx-notes-api endpoint.
Arguments:
api_url (str): base url to the notes api
path (str): path to the resource
Returns:
str: full endpoint to the notes api
"""
try:
if not api_url.endswith("/"):
api_url += "/"
if path:
if path.startswith("/"):
path = path.lstrip("/")
if not path.endswith("/"):
path += "/"
return api_url + path
except (AttributeError, KeyError):
raise ImproperlyConfigured(_("No endpoint was provided for EdxNotes."))
def get_public_endpoint(path=""):
"""Get the full path to a resource on the public notes API."""
return get_endpoint(settings.EDXNOTES_PUBLIC_API, path)
def get_internal_endpoint(path=""):
"""Get the full path to a resource on the private notes API."""
return get_endpoint(settings.EDXNOTES_INTERNAL_API, path)
def get_course_position(course_module):
"""
Return the user's current place in the course.
If this is the user's first time, leads to COURSE/CHAPTER/SECTION.
If this isn't the users's first time, leads to COURSE/CHAPTER.
If there is no current position in the course or chapter, then selects
the first child.
"""
urlargs = {'course_id': unicode(course_module.id)}
chapter = get_current_child(course_module, min_depth=1)
if chapter is None:
log.debug("No chapter found when loading current position in course")
return None
urlargs['chapter'] = chapter.url_name
if course_module.position is not None:
return {
'display_name': chapter.display_name_with_default,
'url': reverse('courseware_chapter', kwargs=urlargs),
}
# Relying on default of returning first child
section = get_current_child(chapter, min_depth=1)
if section is None:
log.debug("No section found when loading current position in course")
return None
urlargs['section'] = section.url_name
return {
'display_name': section.display_name_with_default,
'url': reverse('courseware_section', kwargs=urlargs)
}
def generate_uid():
"""
Generates unique id.
"""
return uuid4().int # pylint: disable=no-member
def is_feature_enabled(course):
"""
Returns True if Student Notes feature is enabled for the course,
False otherwise.
In order for the application to be enabled it must be:
1) enabled globally via FEATURES.
2) present in the course tab configuration.
3) Harvard Annotation Tool must be disabled for the course.
"""
return (settings.FEATURES.get("ENABLE_EDXNOTES")
and [t for t in course.tabs if t["type"] == "edxnotes"] # tab found
and not is_harvard_notes_enabled(course))
def is_harvard_notes_enabled(course):
"""
Returns True if Harvard Annotation Tool is enabled for the course,
False otherwise.
Checks for 'textannotation', 'imageannotation', 'videoannotation' in the list
of advanced modules of the course.
"""
modules = set(['textannotation', 'imageannotation', 'videoannotation'])
return bool(modules.intersection(course.advanced_modules))
| agpl-3.0 |
B3AU/waveTree | examples/cluster/plot_ward_structured_vs_unstructured.py | 7 | 3079 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import pylab as pl
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import Ward
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1000
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = Ward(n_clusters=6).fit(X)
label = ward.labels_
print("Elapsed time: ", time.time() - st)
print("Number of points: ", label.size)
###############################################################################
# Plot result
fig = pl.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=pl.cm.jet(np.float(l) / np.max(label + 1)))
pl.title('Without connectivity constraints')
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = Ward(n_clusters=6, connectivity=connectivity).fit(X)
label = ward.labels_
print("Elapsed time: ", time.time() - st)
print("Number of points: ", label.size)
###############################################################################
# Plot result
fig = pl.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=pl.cm.jet(float(l) / np.max(label + 1)))
pl.title('With connectivity constraints')
pl.show()
| bsd-3-clause |
mpeuster/estate | libestateredis/estate_redis.py | 1 | 3418 | """
libestate prototype using a central redis instance
(used for basic experiment in first paper)
"""
import redis
import time
import logging
logging.basicConfig(level=logging.INFO)
class estate(object):
def __init__(self, instance_id, redis_host="127.0.0.1", redis_port=6379):
# used to distinguish between NF instances
self.instance_id = str(instance_id)
# setup redis connection
self.r = redis.StrictRedis(host=redis_host, port=redis_port, db=0)
self.r.flushdb()
logging.info("ES-REDIS: Initialized estate for instance: %s" % self.instance_id)
def _acquire_lock(self, lockname):
while not self.r.setnx(lockname, 1):
logging.debug("ES: Wait for lock...")
time.sleep(0.1)
logging.debug("ES: Acquired: %s" % lockname)
def _release_lock(self, lockname):
self.r.delete(lockname)
logging.debug("ES: Released: %s" % lockname)
def _update_time(self, k):
"""
Attention: Needs global value key!
Simplification: Redis INCR should already be atomic so that no
lock mechanism is needed. However, we keep it for easy debugging of
locking times.
"""
self._acquire_lock("lock.%s" % k)
val = self.r.incr("globaltime.%s" % k)
logging.debug("ES: Update time: %s is %d" % (k, val))
self._release_lock("lock.%s" % k)
return int(val)
def to_instance_key(self, k):
return "%s.%s" % (str(k), str(self.instance_id))
def set(self, k, s):
# fetch new timestamp for this update
ts = self._update_time(k)
kl = self.to_instance_key(k)
logging.debug("ES: SET k=%s s=%s" % (str(kl), str(s)))
# use pipelined command execution for consistency
pipe = self.r.pipeline()
pipe.set("timestamp.%s" % kl, ts)
pipe.set(kl, s)
return pipe.execute()[1]
def get(self, k):
kl = self.to_instance_key(k)
logging.debug("ES: GET k=%s" % (str(kl)))
res = self.r.get(kl)
return res if res is not None else "ES_NONE"
def delete(self, k):
kl = self.to_instance_key(k)
logging.debug("ES: DEL k=%s" % (str(kl)))
# use pipelined command execution for consistency
pipe = self.r.pipeline()
pipe.delete("timestamp.%s" % kl)
pipe.delete(kl)
return pipe.execute()[1]
def _get_all_replicas(self, k):
"""
Returns lists tuple:
1. state list
2. timestamp list
"""
# we use a single-wildcard symbol to get all replica values
keys = self.r.keys("%s.?" % k)
states = []
timestamps = []
pipe = self.r.pipeline()
for kl in keys:
pipe.mget("timestamp.%s" % kl, kl)
res = pipe.execute()
for r in res:
timestamps.append(r[0])
states.append(r[1])
return (states, timestamps)
def _get_newest_replica(self, k):
states, timestamps = self._get_all_replicas(k)
return states[timestamps.index(max(timestamps))]
def get_global(self, k, red_func):
logging.debug("ES: GET_GLOBAL k=%s f=%s" % (str(k), str(red_func)))
if red_func is not None: # custom red function
return red_func(self._get_all_replicas(k)[0])
return self._get_newest_replica(k) # return newest replica
| apache-2.0 |
ac0x/googletest | test/gtest_help_test.py | 2968 | 5856 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
ryancanhelpyou/servo | tests/wpt/css-tests/tools/wptserve/wptserve/pipes.py | 180 | 13830 | from cgi import escape
import gzip as gzip_module
import re
import time
import types
import uuid
from cStringIO import StringIO
def resolve_content(response):
rv = "".join(item for item in response.iter_content())
if type(rv) == unicode:
rv = rv.encode(response.encoding)
return rv
class Pipeline(object):
pipes = {}
def __init__(self, pipe_string):
self.pipe_functions = self.parse(pipe_string)
def parse(self, pipe_string):
functions = []
for item in PipeTokenizer().tokenize(pipe_string):
if not item:
break
if item[0] == "function":
functions.append((self.pipes[item[1]], []))
elif item[0] == "argument":
functions[-1][1].append(item[1])
return functions
def __call__(self, request, response):
for func, args in self.pipe_functions:
response = func(request, response, *args)
return response
class PipeTokenizer(object):
def __init__(self):
#This whole class can likely be replaced by some regexps
self.state = None
def tokenize(self, string):
self.string = string
self.state = self.func_name_state
self._index = 0
while self.state:
yield self.state()
yield None
def get_char(self):
if self._index >= len(self.string):
return None
rv = self.string[self._index]
self._index += 1
return rv
def func_name_state(self):
rv = ""
while True:
char = self.get_char()
if char is None:
self.state = None
if rv:
return ("function", rv)
else:
return None
elif char == "(":
self.state = self.argument_state
return ("function", rv)
elif char == "|":
if rv:
return ("function", rv)
else:
rv += char
def argument_state(self):
rv = ""
while True:
char = self.get_char()
if char is None:
self.state = None
return ("argument", rv)
elif char == "\\":
rv += self.get_escape()
if rv is None:
#This should perhaps be an error instead
return ("argument", rv)
elif char == ",":
return ("argument", rv)
elif char == ")":
self.state = self.func_name_state
return ("argument", rv)
else:
rv += char
def get_escape(self):
char = self.get_char()
escapes = {"n": "\n",
"r": "\r",
"t": "\t"}
return escapes.get(char, char)
class pipe(object):
def __init__(self, *arg_converters):
self.arg_converters = arg_converters
self.max_args = len(self.arg_converters)
self.min_args = 0
opt_seen = False
for item in self.arg_converters:
if not opt_seen:
if isinstance(item, opt):
opt_seen = True
else:
self.min_args += 1
else:
if not isinstance(item, opt):
raise ValueError("Non-optional argument cannot follow optional argument")
def __call__(self, f):
def inner(request, response, *args):
if not (self.min_args <= len(args) <= self.max_args):
raise ValueError("Expected between %d and %d args, got %d" %
(self.min_args, self.max_args, len(args)))
arg_values = tuple(f(x) for f, x in zip(self.arg_converters, args))
return f(request, response, *arg_values)
Pipeline.pipes[f.__name__] = inner
#We actually want the undecorated function in the main namespace
return f
class opt(object):
def __init__(self, f):
self.f = f
def __call__(self, arg):
return self.f(arg)
def nullable(func):
def inner(arg):
if arg.lower() == "null":
return None
else:
return func(arg)
return inner
def boolean(arg):
if arg.lower() in ("true", "1"):
return True
elif arg.lower() in ("false", "0"):
return False
raise ValueError
@pipe(int)
def status(request, response, code):
"""Alter the status code.
:param code: Status code to use for the response."""
response.status = code
return response
@pipe(str, str, opt(boolean))
def header(request, response, name, value, append=False):
"""Set a HTTP header.
Replaces any existing HTTP header of the same name unless
append is set, in which case the header is appended without
replacement.
:param name: Name of the header to set.
:param value: Value to use for the header.
:param append: True if existing headers should not be replaced
"""
if not append:
response.headers.set(name, value)
else:
response.headers.append(name, value)
return response
@pipe(str)
def trickle(request, response, delays):
"""Send the response in parts, with time delays.
:param delays: A string of delays and amounts, in bytes, of the
response to send. Each component is separated by
a colon. Amounts in bytes are plain integers, whilst
delays are floats prefixed with a single d e.g.
d1:100:d2
Would cause a 1 second delay, would then send 100 bytes
of the file, and then cause a 2 second delay, before sending
the remainder of the file.
If the last token is of the form rN, instead of sending the
remainder of the file, the previous N instructions will be
repeated until the whole file has been sent e.g.
d1:100:d2:r2
Causes a delay of 1s, then 100 bytes to be sent, then a 2s delay
and then a further 100 bytes followed by a two second delay
until the response has been fully sent.
"""
def parse_delays():
parts = delays.split(":")
rv = []
for item in parts:
if item.startswith("d"):
item_type = "delay"
item = item[1:]
value = float(item)
elif item.startswith("r"):
item_type = "repeat"
value = int(item[1:])
if not value % 2 == 0:
raise ValueError
else:
item_type = "bytes"
value = int(item)
if len(rv) and rv[-1][0] == item_type:
rv[-1][1] += value
else:
rv.append((item_type, value))
return rv
delays = parse_delays()
if not delays:
return response
content = resolve_content(response)
modified_content = []
offset = [0]
def sleep(seconds):
def inner():
time.sleep(seconds)
return ""
return inner
def add_content(delays, repeat=False):
for i, (item_type, value) in enumerate(delays):
if item_type == "bytes":
modified_content.append(content[offset[0]:offset[0] + value])
offset[0] += value
elif item_type == "delay":
modified_content.append(sleep(value))
elif item_type == "repeat":
assert i == len(delays) - 1
while offset[0] < len(content):
add_content(delays[-(value + 1):-1], True)
if not repeat and offset[0] < len(content):
modified_content.append(content[offset[0]:])
add_content(delays)
response.content = modified_content
return response
@pipe(nullable(int), opt(nullable(int)))
def slice(request, response, start, end=None):
"""Send a byte range of the response body
:param start: The starting offset. Follows python semantics including
negative numbers.
:param end: The ending offset, again with python semantics and None
(spelled "null" in a query string) to indicate the end of
the file.
"""
content = resolve_content(response)
response.content = content[start:end]
return response
class ReplacementTokenizer(object):
def ident(scanner, token):
return ("ident", token)
def index(scanner, token):
token = token[1:-1]
try:
token = int(token)
except ValueError:
token = unicode(token, "utf8")
return ("index", token)
def var(scanner, token):
token = token[:-1]
return ("var", token)
def tokenize(self, string):
return self.scanner.scan(string)[0]
scanner = re.Scanner([(r"\$\w+:", var),
(r"\$?\w+(?:\(\))?", ident),
(r"\[[^\]]*\]", index)])
class FirstWrapper(object):
def __init__(self, params):
self.params = params
def __getitem__(self, key):
try:
return self.params.first(key)
except KeyError:
return ""
@pipe()
def sub(request, response):
"""Substitute environment information about the server and request into the script.
The format is a very limited template language. Substitutions are
enclosed by {{ and }}. There are several avaliable substitutions:
host
A simple string value and represents the primary host from which the
tests are being run.
domains
A dictionary of available domains indexed by subdomain name.
ports
A dictionary of lists of ports indexed by protocol.
location
A dictionary of parts of the request URL. Valid keys are
'server, 'scheme', 'host', 'hostname', 'port', 'path' and 'query'.
'server' is scheme://host:port, 'host' is hostname:port, and query
includes the leading '?', but other delimiters are omitted.
headers
A dictionary of HTTP headers in the request.
GET
A dictionary of query parameters supplied with the request.
uuid()
A pesudo-random UUID suitable for usage with stash
So for example in a setup running on localhost with a www
subdomain and a http server on ports 80 and 81::
{{host}} => localhost
{{domains[www]}} => www.localhost
{{ports[http][1]}} => 81
It is also possible to assign a value to a variable name, which must start with
the $ character, using the ":" syntax e.g.
{{$id:uuid()}
Later substitutions in the same file may then refer to the variable
by name e.g.
{{$id}}
"""
content = resolve_content(response)
new_content = template(request, content)
response.content = new_content
return response
def template(request, content):
#TODO: There basically isn't any error handling here
tokenizer = ReplacementTokenizer()
variables = {}
def config_replacement(match):
content, = match.groups()
tokens = tokenizer.tokenize(content)
if tokens[0][0] == "var":
variable = tokens[0][1]
tokens = tokens[1:]
else:
variable = None
assert tokens[0][0] == "ident" and all(item[0] == "index" for item in tokens[1:]), tokens
field = tokens[0][1]
if field in variables:
value = variables[field]
elif field == "headers":
value = request.headers
elif field == "GET":
value = FirstWrapper(request.GET)
elif field in request.server.config:
value = request.server.config[tokens[0][1]]
elif field == "location":
value = {"server": "%s://%s:%s" % (request.url_parts.scheme,
request.url_parts.hostname,
request.url_parts.port),
"scheme": request.url_parts.scheme,
"host": "%s:%s" % (request.url_parts.hostname,
request.url_parts.port),
"hostname": request.url_parts.hostname,
"port": request.url_parts.port,
"path": request.url_parts.path,
"query": "?%s" % request.url_parts.query}
elif field == "uuid()":
value = str(uuid.uuid4())
else:
raise Exception("Undefined template variable %s" % field)
for item in tokens[1:]:
value = value[item[1]]
assert isinstance(value, (int,) + types.StringTypes), tokens
if variable is not None:
variables[variable] = value
#Should possibly support escaping for other contexts e.g. script
#TODO: read the encoding of the response
return escape(unicode(value)).encode("utf-8")
template_regexp = re.compile(r"{{([^}]*)}}")
new_content, count = template_regexp.subn(config_replacement, content)
return new_content
@pipe()
def gzip(request, response):
"""This pipe gzip-encodes response data.
It sets (or overwrites) these HTTP headers:
Content-Encoding is set to gzip
Content-Length is set to the length of the compressed content
"""
content = resolve_content(response)
response.headers.set("Content-Encoding", "gzip")
out = StringIO()
with gzip_module.GzipFile(fileobj=out, mode="w") as f:
f.write(content)
response.content = out.getvalue()
response.headers.set("Content-Length", len(response.content))
return response
| mpl-2.0 |
mikehulluk/morphforge | src/morphforge/core/mgrs/__init__.py | 1 | 1791 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from morphforge.core.mgrs.locmgr import LocMgr
from morphforge.core.mgrs.logmgr import LogMgr
from morphforge.core.mgrs.settingsmgr import SettingsMgr
from morphforge.core.mgrs.rcmgr import RCMgr
__all__ = ['LocMgr', 'LogMgr', 'SettingsMgr', 'RCMgr']
| bsd-2-clause |
rowhit/h2o-2 | py/testdir_single_jvm/test_GLM2_poisson_fail.py | 9 | 1571 | import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_import as h2i
print "Just do a failing case"
def define_params():
paramDict = {'destination_key': None, 'standardize': None, 'family': 'poisson', 'beta_epsilon': None, 'max_iter': None, 'higher_accuracy': None, 'tweedie_variance_power': None, 'lambda_search': 1, 'ignored_cols': 0, 'source': u'covtype.20k.hex', 'n_folds': 1, 'alpha': 0.8, 'use_all_factor_levels': None, 'response': 54, 'lambda': 0}
return paramDict
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_poisson_fail(self):
csvPathname = 'covtype/covtype.20k.data'
hex_key = 'covtype.20k.hex'
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=hex_key, schema='put')
params = define_params()
for trial in range(3):
kwargs = params.copy()
start = time.time()
glm = h2o_cmd.runGLM(timeoutSecs=180, parseResult=parseResult, **kwargs)
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
h2o.check_sandbox_for_errors()
print "glm end on ", csvPathname, 'took', time.time() - start, 'seconds'
print "Trial #", trial, "completed\n"
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
puttarajubr/commcare-hq | custom/succeed/reports/patient_submissions.py | 2 | 3518 | from django.core.urlresolvers import reverse
from sqlagg.columns import SimpleColumn
from sqlagg.filters import EQ
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.sqlreport import SqlData, DatabaseColumn
from corehq.apps.reports.standard import CustomProjectReport, ProjectReportParametersMixin
from corehq.apps.userreports.sql import get_table_name
from custom.succeed.reports.patient_details import PatientDetailsReport
from django.utils import html
class PatientSubmissionData(SqlData):
slug = 'succeed_submissions'
@property
def table_name(self):
return get_table_name(self.config['domain'], self.slug)
@property
def columns(self):
return [
DatabaseColumn('Doc Id', SimpleColumn('doc_id')),
DatabaseColumn('Form name', SimpleColumn('form_name')),
DatabaseColumn('Submitted By', SimpleColumn('username')),
DatabaseColumn('Completed', SimpleColumn('date')),
]
@property
def filters(self):
return [EQ('case_id', 'case_id')]
@property
def group_by(self):
return ['doc_id', 'form_name', 'username', 'date']
class PatientSubmissionReport(GenericTabularReport, CustomProjectReport, ProjectReportParametersMixin):
slug = "patient_submissions"
name = 'Patient Submissions'
use_datatables = True
hide_filters = True
@classmethod
def show_in_navigation(cls, domain=None, project=None, user=None):
if domain and project and user is None:
return True
return False
@property
def report_config(self):
return {
'domain': self.domain,
'case_id': self.request.GET.get('patient_id'),
}
@property
def model(self):
return PatientSubmissionData(config=self.report_config)
@property
def fields(self):
return []
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn("Form Name", prop_name='@name'),
DataTablesColumn("Submitted By", prop_name='form.meta.username'),
DataTablesColumn("Completed", prop_name='received_on')
)
@property
def rows(self):
if self.request.GET.has_key('patient_id'):
def _format_row(row_field_dict):
return [
self.submit_history_form_link(row_field_dict["doc_id"],
row_field_dict['form_name']),
row_field_dict['username'],
row_field_dict['date']
]
rows = self.model.get_data()
for row in rows:
yield list(_format_row(row))
def submit_history_form_link(self, form_id, form_name):
url = reverse('render_form_data', args=[self.domain, form_id])
return html.mark_safe("<a class='ajax_dialog' href='%s'"
"target='_blank'>%s</a>" % (url, html.escape(form_name)))
@property
def report_context(self):
ret = super(PatientSubmissionReport, self).report_context
ret['view_mode'] = 'submissions'
tabular_context = PatientDetailsReport(self.request).report_context
tabular_context.update(ret)
self.report_template_path = "patient_submissions.html"
tabular_context['patient_id'] = self.request_params['patient_id']
return tabular_context
| bsd-3-clause |
yangjae/grpc | src/python/src/grpc/framework/foundation/logging_pool.py | 39 | 3052 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A thread pool that logs exceptions raised by tasks executed within it."""
import functools
import logging
from concurrent import futures
def _wrap(behavior):
"""Wraps an arbitrary callable behavior in exception-logging."""
@functools.wraps(behavior)
def _wrapping(*args, **kwargs):
try:
return behavior(*args, **kwargs)
except Exception as e:
logging.exception('Unexpected exception from task run in logging pool!')
raise
return _wrapping
class _LoggingPool(object):
"""An exception-logging futures.ThreadPoolExecutor-compatible thread pool."""
def __init__(self, backing_pool):
self._backing_pool = backing_pool
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._backing_pool.shutdown(wait=True)
def submit(self, fn, *args, **kwargs):
return self._backing_pool.submit(_wrap(fn), *args, **kwargs)
def map(self, func, *iterables, **kwargs):
return self._backing_pool.map(
_wrap(func), *iterables, timeout=kwargs.get('timeout', None))
def shutdown(self, wait=True):
self._backing_pool.shutdown(wait=wait)
def pool(max_workers):
"""Creates a thread pool that logs exceptions raised by the tasks within it.
Args:
max_workers: The maximum number of worker threads to allow the pool.
Returns:
A futures.ThreadPoolExecutor-compatible thread pool that logs exceptions
raised by the tasks executed within it.
"""
return _LoggingPool(futures.ThreadPoolExecutor(max_workers))
| bsd-3-clause |
synicalsyntax/zulip | zerver/views/realm_domains.py | 3 | 2632 | from django.core.exceptions import ValidationError
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.decorator import require_realm_admin
from zerver.lib.actions import do_add_realm_domain, do_change_realm_domain, do_remove_realm_domain
from zerver.lib.domains import validate_domain
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_error, json_success
from zerver.lib.validator import check_bool, check_string
from zerver.models import RealmDomain, UserProfile, get_realm_domains
def list_realm_domains(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
domains = get_realm_domains(user_profile.realm)
return json_success({'domains': domains})
@require_realm_admin
@has_request_variables
def create_realm_domain(request: HttpRequest, user_profile: UserProfile,
domain: str=REQ(validator=check_string),
allow_subdomains: bool=REQ(validator=check_bool)) -> HttpResponse:
domain = domain.strip().lower()
try:
validate_domain(domain)
except ValidationError as e:
return json_error(_('Invalid domain: {}').format(e.messages[0]))
if RealmDomain.objects.filter(realm=user_profile.realm, domain=domain).exists():
return json_error(_("The domain {domain} is already"
" a part of your organization.").format(domain=domain))
realm_domain = do_add_realm_domain(user_profile.realm, domain, allow_subdomains)
return json_success({'new_domain': [realm_domain.id, realm_domain.domain]})
@require_realm_admin
@has_request_variables
def patch_realm_domain(request: HttpRequest, user_profile: UserProfile, domain: str,
allow_subdomains: bool=REQ(validator=check_bool)) -> HttpResponse:
try:
realm_domain = RealmDomain.objects.get(realm=user_profile.realm, domain=domain)
do_change_realm_domain(realm_domain, allow_subdomains)
except RealmDomain.DoesNotExist:
return json_error(_('No entry found for domain {domain}.').format(domain=domain))
return json_success()
@require_realm_admin
@has_request_variables
def delete_realm_domain(request: HttpRequest, user_profile: UserProfile,
domain: str) -> HttpResponse:
try:
realm_domain = RealmDomain.objects.get(realm=user_profile.realm, domain=domain)
do_remove_realm_domain(realm_domain)
except RealmDomain.DoesNotExist:
return json_error(_('No entry found for domain {domain}.').format(domain=domain))
return json_success()
| apache-2.0 |
janocat/odoo | addons/sale_crm/sale_crm.py | 320 | 1429 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class sale_order(osv.osv):
_name = "sale.order"
_inherit = ['sale.order', 'crm.tracking.mixin']
_columns = {
'categ_ids': fields.many2many('crm.case.categ', 'sale_order_category_rel', 'order_id', 'category_id', 'Tags', \
domain="['|', ('section_id', '=', section_id), ('section_id', '=', False), ('object_id.model', '=', 'crm.lead')]", context="{'object_name': 'crm.lead'}")
}
| agpl-3.0 |
4Catalyzer/flask-annex | setup.py | 1 | 2066 | import subprocess
from setuptools import Command, setup
# -----------------------------------------------------------------------------
def system(command):
class SystemCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
subprocess.check_call(command, shell=True)
return SystemCommand
# -----------------------------------------------------------------------------
setup(
name="Flask-Annex",
version="0.5.0",
description="Efficient integration of external storage services for Flask",
url="https://github.com/4Catalyzer/flask-annex",
author="Jimmy Jia",
author_email="tesrin@gmail.com",
license="MIT",
python_requires=">=3.6",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Framework :: Flask",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords="storage s3 flask",
packages=("flask_annex",),
install_requires=("Flask >= 0.10",),
extras_require={
"s3": ("boto3 >= 1.4.0",),
"tests": ("pytest", "pytest-cov"),
"tests-s3": ("moto", "requests"),
},
cmdclass={
"clean": system("rm -rf build dist *.egg-info"),
"package": system("python setup.py sdist bdist_wheel"),
"publish": system("twine upload dist/*"),
"release": system("python setup.py clean package publish"),
"test": system("tox"),
},
)
| mit |
targos/VisuMol | lib/components/highlight.js/docs/conf.py | 7 | 7771 | # -*- coding: utf-8 -*-
#
# highlight.js documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 12 23:48:27 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'highlight.js'
copyright = u'2012, Ivan Sagalaev'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '7.2'
# The full version, including alpha/beta/rc tags.
release = '7.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'highlightjsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'highlightjs.tex', u'highlight.js Documentation',
u'Ivan Sagalaev', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'highlightjs', u'highlight.js Documentation',
[u'Ivan Sagalaev'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'highlightjs', u'highlight.js Documentation',
u'Ivan Sagalaev', 'highlightjs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit |
kirbyherm/root-r-tools | geom/gdml/ROOTwriter.py | 85 | 21968 | # @(#)root/gdml:$Id$
# Author: Witold Pokorski 05/06/2006
from math import *
import libPyROOT
import ROOT
import math
import re
# This class provides ROOT binding for the 'writer' class. It implements specific
# methods for all the supported TGeo classes which call the appropriate 'add-element'
# methods from the 'writer' class.
# The list of presently supported classes is the following:
# Materials:
# TGeoElement
# TGeoMaterial
# GeoMixture
# Solids:
# TGeoBBox
# TGeoArb8
# TGeoTubeSeg
# TGeoConeSeg
# TGeoCtub
# TGeoPcon
# TGeoTrap
# TGeoGtra
# TGeoTrd2
# TGeoSphere
# TGeoPara
# TGeoTorus
# TGeoHype
# TGeoPgon
# TGeoXtru
# TGeoEltu
# TGeoParaboloid
# TGeoCompositeShape (subtraction, union, intersection)
# Geometry:
# TGeoVolume
# In addition the class contains three methods 'dumpMaterials', 'dumpSolids' and 'examineVol'
# which retrieve from the memory the materials, the solids and the geometry tree
# respectively. The user should instanciate this class passing and instance of 'writer'
# class as argument. In order to export the geometry in the form of a GDML file,
# the three methods (dumpMaterials, dumpSolids and examineVol) should be called.
# The argument of 'dumpMaterials' method should be the list of materials,
# the argument of the 'dumpSolids' method should be the list of solids and
# the argument of the 'examineVol' method should be the top volume of
# the geometry tree.
# For any question or remarks concerning this code, please send an email to
# Witold.Pokorski@cern.ch.
class ROOTwriter(object):
def __init__(self, writer):
self.writer = writer
self.elements = []
self.volumeCount = 0
self.nodeCount = 0
self.shapesCount = 0
self.bvols = []
self.vols = []
self.volsUseCount = {}
self.sortedVols = []
self.nodes = []
self.bnodes = []
self.solList = []
self.geomgr = ROOT.gGeoManager
self.geomgr.SetAllIndex()
pass
def genName(self, name):
re.sub('$', '', name)
return name
def rotXYZ(self, r):
rad = 180/acos(-1)
cosb = math.sqrt( r[0]*r[0] + r[1]*r[1] )
if cosb > 0.00001 : #I didn't find a proper constant to use here, so I just put a value that works with all the examples on a linux machine (P4)
a = atan2( r[5], r[8] ) * rad
b = atan2( -r[2], cosb ) * rad
c = atan2( r[1], r[0] ) * rad
else:
a = atan2( -r[7], r[4] ) * rad
b = atan2( -r[2], cosb ) * rad
c = 0.
return (a, b, c)
def TGeoBBox(self, solid):
self.writer.addBox(self.genName(solid.GetName())+'_'+str(libPyROOT.AddressOf(solid)[0]), 2*solid.GetDX(), 2*solid.GetDY(), 2*solid.GetDZ())
def TGeoParaboloid(self, solid):
self.writer.addParaboloid(self.genName(solid.GetName())+'_'+str(libPyROOT.AddressOf(solid)[0]), solid.GetRlo(), solid.GetRhi(), solid.GetDz())
def TGeoSphere(self, solid):
self.writer.addSphere(self.genName(solid.GetName())+'_'+str(libPyROOT.AddressOf(solid)[0]), solid.GetRmin(), solid.GetRmax(),
solid.GetPhi1(), solid.GetPhi2() - solid.GetPhi1(),
solid.GetTheta1(), solid.GetTheta2() - solid.GetTheta1())
def TGeoArb8(self, solid):
self.writer.addArb8(self.genName(solid.GetName())+'_'+str(libPyROOT.AddressOf(solid)[0]),
solid.GetVertices()[0],
solid.GetVertices()[1],
solid.GetVertices()[2],
solid.GetVertices()[3],
solid.GetVertices()[4],
solid.GetVertices()[5],
solid.GetVertices()[6],
solid.GetVertices()[7],
solid.GetVertices()[8],
solid.GetVertices()[9],
solid.GetVertices()[10],
solid.GetVertices()[11],
solid.GetVertices()[12],
solid.GetVertices()[13],
solid.GetVertices()[14],
solid.GetVertices()[15],
solid.GetDz())
def TGeoConeSeg(self, solid):
self.writer.addCone(self.genName(solid.GetName())+'_'+str(libPyROOT.AddressOf(solid)[0]), 2*solid.GetDz(), solid.GetRmin1(), solid.GetRmin2(),
solid.GetRmax1(), solid.GetRmax2(), solid.GetPhi1(), solid.GetPhi2() - solid.GetPhi1())
def TGeoCone(self, solid):
self.writer.addCone(self.genName(solid.GetName())+'_'+str(libPyROOT.AddressOf(solid)[0]), 2*solid.GetDz(), solid.GetRmin1(), solid.GetRmin2(),
solid.GetRmax1(), solid.GetRmax2(), 0, 360)
def TGeoPara(self, solid):
self.writer.addPara(self.genName(solid.GetName())+'_'+str(libPyROOT.AddressOf(solid)[0]), solid.GetX(), solid.GetY(), solid.GetZ(),
solid.GetAlpha(), solid.GetTheta(), solid.GetPhi())
def TGeoTrap(self, solid):
self.writer.addTrap(self.genName(solid.GetName())+'_'+str(libPyROOT.AddressOf(solid)[0]), 2*solid.GetDz(), solid.GetTheta(), solid.GetPhi(),
2*solid.GetH1(), 2*solid.GetBl1(), 2*solid.GetTl1(), solid.GetAlpha1(),
2*solid.GetH2(), 2*solid.GetBl2(), 2*solid.GetTl2(), solid.GetAlpha2())
def TGeoGtra(self, solid):
self.writer.addTwistedTrap(self.genName(solid.GetName())+'_'+str(libPyROOT.AddressOf(solid)[0]), 2*solid.GetDz(), solid.GetTheta(), solid.GetPhi(),
2*solid.GetH1(), 2*solid.GetBl1(), 2*solid.GetTl1(), solid.GetAlpha1(),
2*solid.GetH2(), 2*solid.GetBl2(), 2*solid.GetTl2(), solid.GetAlpha2(), solid.GetTwistAngle())
def TGeoTrd1(self, solid):
self.writer.addTrd(self.genName(solid.GetName())+'_'+str(libPyROOT.AddressOf(solid)[0]), 2*solid.GetDx1(), 2*solid.GetDx2(), 2*solid.GetDy(),
2*solid.GetDy(), 2*solid.GetDz())
def TGeoTrd2(self, solid):
self.writer.addTrd(self.genName(solid.GetName())+'_'+str(libPyROOT.AddressOf(solid)[0]), 2*solid.GetDx1(), 2*solid.GetDx2(), 2*solid.GetDy1(),
2*solid.GetDy2(), 2*solid.GetDz())
def TGeoTubeSeg(self, solid):
self.writer.addTube(self.genName(solid.GetName())+'_'+str(libPyROOT.AddressOf(solid)[0]), solid.GetRmin(), solid.GetRmax(),
2*solid.GetDz(), solid.GetPhi1(), solid.GetPhi2()-solid.GetPhi1())
def TGeoCtub(self, solid):
self.writer.addCutTube(self.genName(solid.GetName())+'_'+str(libPyROOT.AddressOf(solid)[0]), solid.GetRmin(), solid.GetRmax(),
2*solid.GetDz(), solid.GetPhi1(), solid.GetPhi2()-solid.GetPhi1(),
solid.GetNlow()[0],
solid.GetNlow()[1],
solid.GetNlow()[2],
solid.GetNhigh()[0],
solid.GetNhigh()[1],
solid.GetNhigh()[2])
def TGeoTube(self, solid):
self.writer.addTube(self.genName(solid.GetName())+'_'+str(libPyROOT.AddressOf(solid)[0]), solid.GetRmin(), solid.GetRmax(),
2*solid.GetDz(), 0, 360)
def TGeoPcon(self, solid):
zplanes = []
for i in range(solid.GetNz()):
zplanes.append( (solid.GetZ(i), solid.GetRmin(i), solid.GetRmax(i)) )
self.writer.addPolycone(self.genName(solid.GetName())+'_'+str(libPyROOT.AddressOf(solid)[0]), solid.GetPhi1(), solid.GetDphi(), zplanes)
def TGeoTorus(self, solid):
self.writer.addTorus(self.genName(solid.GetName())+'_'+str(libPyROOT.AddressOf(solid)[0]), solid.GetR(), solid.GetRmin(), solid.GetRmax(),
solid.GetPhi1(), solid.GetDphi())
def TGeoPgon(self, solid):
zplanes = []
for i in range(solid.GetNz()):
zplanes.append( (solid.GetZ(i), solid.GetRmin(i), solid.GetRmax(i)) )
self.writer.addPolyhedra(self.genName(solid.GetName())+'_'+str(libPyROOT.AddressOf(solid)[0]), solid.GetPhi1(), solid.GetDphi(),
solid.GetNedges(), zplanes)
def TGeoXtru(self, solid):
vertices = []
sections = []
for i in range(solid.GetNvert()):
vertices.append( (solid.GetX(i), solid.GetY(i)) )
for i in range(solid.GetNz()):
sections.append( (i, solid.GetZ(i), solid.GetXOffset(i), solid.GetYOffset(i), solid.GetScale(i)) )
self.writer.addXtrusion(self.genName(solid.GetName())+'_'+str(libPyROOT.AddressOf(solid)[0]), vertices, sections)
def TGeoEltu(self, solid):
self.writer.addEltube(self.genName(solid.GetName())+'_'+str(libPyROOT.AddressOf(solid)[0]), solid.GetA(), solid.GetB(), solid.GetDz())
def TGeoHype(self, solid):
self.writer.addHype(self.genName(solid.GetName())+'_'+str(libPyROOT.AddressOf(solid)[0]), solid.GetRmin(), solid.GetRmax(),
solid.GetStIn(), solid.GetStOut(), 2*solid.GetDz())
def TGeoUnion(self, solid):
lrot = self.rotXYZ(solid.GetBoolNode().GetLeftMatrix().Inverse().GetRotationMatrix())
rrot = self.rotXYZ(solid.GetBoolNode().GetRightMatrix().Inverse().GetRotationMatrix())
if ([solid.GetBoolNode().GetLeftShape(), 0]) in self.solList:
self.solList[self.solList.index([solid.GetBoolNode().GetLeftShape(), 0])][1] = 1
eval('self.'+solid.GetBoolNode().GetLeftShape().__class__.__name__)(solid.GetBoolNode().GetLeftShape())
self.shapesCount = self.shapesCount + 1
if ([solid.GetBoolNode().GetRightShape(), 0]) in self.solList:
self.solList[self.solList.index([solid.GetBoolNode().GetRightShape(), 0])][1] = 1
eval('self.'+solid.GetBoolNode().GetRightShape().__class__.__name__)(solid.GetBoolNode().GetRightShape())
self.shapesCount = self.shapesCount + 1
self.writer.addUnion(self.genName(solid.GetName())+'_'+str(libPyROOT.AddressOf(solid)[0]),
solid.GetBoolNode().GetLeftShape().GetName()+'_'+str(libPyROOT.AddressOf(solid.GetBoolNode().GetLeftShape())[0]),
solid.GetBoolNode().GetLeftMatrix().GetTranslation(),
lrot,
solid.GetBoolNode().GetRightShape().GetName()+'_'+str(libPyROOT.AddressOf(solid.GetBoolNode().GetRightShape())[0]),
solid.GetBoolNode().GetRightMatrix().GetTranslation(),
rrot)
def TGeoIntersection(self, solid):
lrot = self.rotXYZ(solid.GetBoolNode().GetLeftMatrix().Inverse().GetRotationMatrix())
rrot = self.rotXYZ(solid.GetBoolNode().GetRightMatrix().Inverse().GetRotationMatrix())
if ([solid.GetBoolNode().GetLeftShape(), 0]) in self.solList:
self.solList[self.solList.index([solid.GetBoolNode().GetLeftShape(), 0])][1] = 1
eval('self.'+solid.GetBoolNode().GetLeftShape().__class__.__name__)(solid.GetBoolNode().GetLeftShape())
self.shapesCount = self.shapesCount + 1
if ([solid.GetBoolNode().GetRightShape(), 0]) in self.solList:
self.solList[self.solList.index([solid.GetBoolNode().GetRightShape(), 0])][1] = 1
eval('self.'+solid.GetBoolNode().GetRightShape().__class__.__name__)(solid.GetBoolNode().GetRightShape())
self.shapesCount = self.shapesCount + 1
self.writer.addIntersection(self.genName(solid.GetName())+'_'+str(libPyROOT.AddressOf(solid)[0]),
solid.GetBoolNode().GetLeftShape().GetName()+'_'+str(libPyROOT.AddressOf(solid.GetBoolNode().GetLeftShape())[0]),
solid.GetBoolNode().GetLeftMatrix().GetTranslation(),
lrot,
solid.GetBoolNode().GetRightShape().GetName()+'_'+str(libPyROOT.AddressOf(solid.GetBoolNode().GetRightShape())[0]),
solid.GetBoolNode().GetRightMatrix().GetTranslation(),
rrot)
def TGeoSubtraction(self, solid):
lrot = self.rotXYZ(solid.GetBoolNode().GetLeftMatrix().Inverse().GetRotationMatrix())
rrot = self.rotXYZ(solid.GetBoolNode().GetRightMatrix().Inverse().GetRotationMatrix())
if ([solid.GetBoolNode().GetLeftShape(), 0]) in self.solList:
self.solList[self.solList.index([solid.GetBoolNode().GetLeftShape(), 0])][1] = 1
eval('self.'+solid.GetBoolNode().GetLeftShape().__class__.__name__)(solid.GetBoolNode().GetLeftShape())
self.shapesCount = self.shapesCount + 1
if ([solid.GetBoolNode().GetRightShape(), 0]) in self.solList:
self.solList[self.solList.index([solid.GetBoolNode().GetRightShape(), 0])][1] = 1
eval('self.'+solid.GetBoolNode().GetRightShape().__class__.__name__)(solid.GetBoolNode().GetRightShape())
self.shapesCount = self.shapesCount + 1
self.writer.addSubtraction(self.genName(solid.GetName())+'_'+str(libPyROOT.AddressOf(solid)[0]),
solid.GetBoolNode().GetLeftShape().GetName()+'_'+str(libPyROOT.AddressOf(solid.GetBoolNode().GetLeftShape())[0]),
solid.GetBoolNode().GetLeftMatrix().GetTranslation(),
lrot,
solid.GetBoolNode().GetRightShape().GetName()+'_'+str(libPyROOT.AddressOf(solid.GetBoolNode().GetRightShape())[0]),
solid.GetBoolNode().GetRightMatrix().GetTranslation(),
rrot)
def TGeoCompositeShape(self, solid):
eval('self.'+solid.GetBoolNode().__class__.__name__)(solid)
def dumpMaterials(self, matlist):
print 'Info in <TPython::Exec>: Found ', matlist.GetSize(),' materials'
for mat in matlist:
if not mat.IsMixture():
self.writer.addMaterial(self.genName(mat.GetName()), mat.GetA(), mat.GetZ(), mat.GetDensity())
else:
elems = {}
for index in range(mat.GetNelements()):
elems[mat.GetElement(index).GetName()] = mat.GetWmixt()[index]
el = mat.GetElement(index)
if el not in self.elements:
self.elements.append(el)
self.writer.addElement(mat.GetElement(index).GetTitle(), mat.GetElement(index).GetName(), mat.GetZmixt()[index], mat.GetAmixt()[index])
self.writer.addMixture(self.genName(mat.GetName()), mat.GetDensity(), elems)
def dumpSolids(self, shapelist):
print 'Info in <TPython::Exec>: Found ', shapelist.GetEntries(), ' shapes'
for shape in shapelist:
self.solList.append([shape, 0])
for sol in self.solList:
if sol[1] == 0:
sol[1] = 1
#print eval('self.'+sol[0].__class__.__name__)(sol[0])
eval('self.'+sol[0].__class__.__name__)(sol[0])
self.shapesCount = self.shapesCount + 1
print 'Info in <TPython::Exec>: Dumped ', self.shapesCount, ' shapes'
def orderVolumes(self, volume):
index = str(volume.GetNumber())+"_"+str(libPyROOT.AddressOf(volume)[0])
daughters = volume.GetNodes()
if len(self.sortedVols)<len(self.vols) and self.volsUseCount[index]>0:
self.volsUseCount[index] = self.volsUseCount[index]-1
if self.volsUseCount[index]==0:
self.sortedVols.append(volume)
if daughters:
for node in daughters:
self.orderVolumes(node.GetVolume())
self.nodeCount = self.nodeCount+1
if self.nodeCount%10000==0:
print '[FIRST STAGE] Node count: ', self.nodeCount
elif len(self.sortedVols)<len(self.volsUseCount) and self.volsUseCount[index]==0:
self.sortedVols.append(volume)
if daughters:
for node in daughters:
self.orderVolumes(node.GetVolume())
self.nodeCount = self.nodeCount+1
if self.nodeCount%10000==0:
print '[FIRST STAGE] Node count: ', self.nodeCount
def getNodes(self, volume):
nd = volume.GetNdaughters()
if nd:
for i in range(nd):
currentNode = volume.GetNode(i)
nextVol = currentNode.GetVolume()
index = str(nextVol.GetNumber())+"_"+str(libPyROOT.AddressOf(nextVol)[0])
self.volsUseCount[index] = self.volsUseCount[index]+1
self.nodes.append(currentNode)
self.getNodes(nextVol)
self.nodeCount = self.nodeCount+1
if self.nodeCount%10000==0:
print '[ZEROTH STAGE] Analysing node: ', self.nodeCount
def examineVol2(self, volume): #use with geometries containing many volumes
print ''
print '[RETRIEVING VOLUME LIST]'
self.bvols = geomgr.GetListOfVolumes()
print ''
print '[INITIALISING VOLUME USE COUNT]'
for vol in self.bvols:
self.vols.append(vol)
self.volsUseCount[str(vol.GetNumber())+"_"+str(libPyROOT.AddressOf(vol)[0])]=0
print ''
print '[CALCULATING VOLUME USE COUNT]'
self.nodeCount = 0
self.getNodes(volume)
print ''
print '[ORDERING VOLUMES]'
self.nodeCount = 0
self.orderVolumes(volume)
print ''
print '[DUMPING GEOMETRY TREE]'
self.sortedVols.reverse()
self.nodeCount = 0
self.dumpGeoTree()
print ''
print '[FINISHED!]'
print ''
def examineVol(self, volume): #use with geometries containing very few volumes and many nodes
daughters = []
if volume.GetNodes():
for node in volume.GetNodes():
subvol = node.GetVolume()
#if bit not set, set and save primitive
if not subvol.TestAttBit(524288): #value referring to TGeoAtt::kSavePrimitiveAtt (1 << 19)
subvol.SetAttBit(524288)
self.vols.append(subvol)
self.examineVol(subvol)
name = node.GetName()+str(libPyROOT.AddressOf(subvol)[0])+'in'+volume.GetName()+str(libPyROOT.AddressOf(volume)[0])
pos = node.GetMatrix().GetTranslation()
self.writer.addPosition(name+'pos', pos[0], pos[1], pos[2])
r = self.rotXYZ(node.GetMatrix().GetRotationMatrix())
rotname = ''
if r[0]!=0.0 or r[1]!=0.0 or r[2]!=0.0:
self.writer.addRotation(name+'rot', r[0], r[1], r[2])
rotname = name+'rot'
reflection = node.GetMatrix().IsReflection()#check if this daughter has a reflection matrix
if reflection:
rotmat = node.GetMatrix().GetRotationMatrix()
#add new 'reflectedSolid' shape to solids
self.writer.addReflSolid('refl_'+node.GetVolume().GetShape().GetName()+'_'+str(libPyROOT.AddressOf(node.GetVolume().GetShape())[0]), node.GetVolume().GetShape().GetName()+'_'+str(libPyROOT.AddressOf(node.GetVolume().GetShape())[0]), 0, 0, 0, rotmat[0], rotmat[4], rotmat[8], 0, 0, 0)
#add new volume with correct solidref to the new reflectedSolid
emptyd = []
self.writer.addVolume('refl_'+node.GetVolume().GetName()+'_'+str(libPyROOT.AddressOf(node.GetVolume())[0]), 'refl_'+node.GetVolume().GetShape().GetName()+'_'+str(libPyROOT.AddressOf(node.GetVolume().GetShape())[0]), self.genName(node.GetVolume().GetMaterial().GetName()), emptyd)
#add new volume as volumeref to this physvol
daughters.append( ('refl_'+node.GetVolume().GetName()+'_'+str(libPyROOT.AddressOf(node.GetVolume())[0]), name+'pos', rotname) )
else:
daughters.append( (node.GetVolume().GetName()+'_'+str(libPyROOT.AddressOf(node.GetVolume())[0]), name+'pos', rotname) )
if volume.IsTopVolume():
if not volume.IsAssembly():
self.writer.addVolume(volume.GetName(), volume.GetShape().GetName()+'_'+str(libPyROOT.AddressOf(volume.GetShape())[0]), self.genName(volume.GetMaterial().GetName()), daughters)
else:
self.writer.addAssembly(volume.GetName(), daughters)
else:
if not volume.IsAssembly():
self.writer.addVolume(volume.GetName()+'_'+str(libPyROOT.AddressOf(volume)[0]), volume.GetShape().GetName()+'_'+str(libPyROOT.AddressOf(volume.GetShape())[0]), self.genName(volume.GetMaterial().GetName()), daughters)
else:
self.writer.addAssembly(volume.GetName()+'_'+str(libPyROOT.AddressOf(volume)[0]), daughters)
def dumpGeoTree(self):
for volume in self.sortedVols:
nd = volume.GetNdaughters()
daughters = []
if nd:
for i in range(nd):
node = volume.GetNode(i)
name = node.GetName()+'in'+volume.GetName()
pos = node.GetMatrix().GetTranslation()
self.writer.addPosition(name+'pos', pos[0], pos[1], pos[2])
r = self.rotXYZ(node.GetMatrix().GetRotationMatrix())
rotname = ''
if r[0]!=0.0 or r[1]!=0.0 or r[2]!=0.0:
self.writer.addRotation(name+'rot', r[0], r[1], r[2])
rotname = name+'rot'
daughters.append( (node.GetVolume().GetName()+'_'+str(libPyROOT.AddressOf(node.GetVolume())[0]), name+'pos', rotname) )
self.nodeCount = self.nodeCount+1
if self.nodeCount%100==0:
print '[SECOND STAGE] Volume Count: ', self.nodeCount, node.GetVolume().GetName()+'_'+str(libPyROOT.AddressOf(node.GetVolume())[0])
if volume.IsTopVolume():
if not volume.IsAssembly():
self.writer.addVolume(volume.GetName(), volume.GetShape().GetName()+'_'+str(libPyROOT.AddressOf(volume.GetShape())[0]), self.genName(volume.GetMaterial().GetName()), daughters)
else:
self.writer.addAssembly(volume.GetName(), daughters)
else:
if not volume.IsAssembly():
self.writer.addVolume(volume.GetName()+'_'+str(libPyROOT.AddressOf(volume)[0]), volume.GetShape().GetName()+'_'+str(libPyROOT.AddressOf(volume.GetShape())[0]), self.genName(volume.GetMaterial().GetName()), daughters)
else:
self.writer.addAssembly(volume.GetName()+'_'+str(libPyROOT.AddressOf(volume)[0]), daughters)
| lgpl-2.1 |
michalskalski/puppet-eseries-old | acceptancetests/netapp_restlibs.py | 1 | 7689 | from pprint import pprint
import time
import json
import contextlib
import requests
from netapp_config import base_url, session
#resource paths
resources = {
'storage-systems': '/storage-systems',
'storage-system': "/storage-systems/{array_id}",
'pools': "/storage-systems/{array_id}/storage-pools",
'pool': "/storage-systems/{array_id}/storage-pools/{id}",
'drives': "/storage-systems/{array_id}/drives",
'drive': "/storage-systems/{array_id}/drives/{id}",
'volumes': "/storage-systems/{array_id}/volumes",
'volume': "/storage-systems/{array_id}/volumes/{id}",
'thin_volumes' : "/storage-systems/{array_id}/thin-volumes",
'thin_volume' : "/storage-systems/{array_id}/thin-volume/{id}",
'snapshot_groups': "/storage-systems/{array_id}/snapshot-groups",
'snapshot_group': "/storage-systems/{array_id}/snapshot-groups/{id}",
'snapshot_views': "/storage-systems/{array_id}/snapshot-volumes",
'snapshot_view': "/storage-systems/{array_id}/snapshot-volumes/{id}",
'snapshots': "/storage-systems/{array_id}/snapshot-images",
'snapshot': "/storage-systems/{array_id}/snapshot-images/{id}",
'volume_copies' : "/storage-systems/{array_id}/volume-copy-jobs",
'volume_copy' : "/storage-systems/{array_id}/volume-copy-jobs/{id}",
'volume_copy_control' : "/storage-systems/{array_id}/volume-copy-jobs-control/{id}",
'analysed_volume_statistics': "/storage-systems/{array_id}/analysed-volume-statistics",
'volume_statistics': "/storage-systems/{array_id}/volume-statistics",
'volume_statistic' : '/storage-systems/{array_id}/volume-statistics/{id}',
'analysed-drive_statistics': "/storage-systems/{array_id}/analysed-drive-statistics",
'drive_statistics': "/storage-systems/{array_id}/drive-statistics",
'drive_statistic' : '/storage-systems/{array_id}/drive-statistics/{id}',
'volume_mappings': "/storage-systems/{array_id}/volume-mappings",
'volume_mapping': "/storage-systems/{array_id}/volume-mappings/{id}",
'host_groups': '/storage-systems/{array_id}/host-groups',
'host_group': '/storage-systems/{array_id}/host-groups/{id}',
'hosts': '/storage-systems/{array_id}/hosts',
'host': '/storage-systems/{array_id}/hosts/{id}',
'host_ports' : '/storage-systems/{array_id}/host-ports',
'host_port' : '/storage-systems/{array_id}/host-ports/{id}',
'host_types' : '/storage-systems/{array_id}/host-types',
'events' : "/storage-systems/{array_id}/mel-events?count=8192",
'critical_events' : "/storage-systems/{array_id}/mel-events?critical=true",
'hardware' : "/storage-systems/{array_id}/hardware-inventory/",
'graph' : "/storage-systems/{array_id}/graph/",
'symbol': "/storage-systems/{array_id}/symbol/{command}/",
"cgroups" : "/storage-systems/{array_id}/consistency-groups",
"cgroup" : "/storage-systems/{array_id}/consistency-groups/{id}",
"cgView" : "/storage-systems/{array_id}/consistency-groups/{cgId}/views/{viewId}",
"cgMembers" : "/storage-systems/{array_id}/consistency-groups/{cgId}/member-volumes",
"cgMember" : "/storage-systems/{array_id}/consistency-groups/{cgId}/member-volumes/{volumeRef}",
"cgSnapshots" : "/storage-systems/{array_id}/consistency-groups/{cgId}/snapshots",
"cgSnapshot" : "/storage-systems/{array_id}/consistency-groups/{cgId}/snapshots/{sequenceNumber}",
"cgRollback" : "/storage-systems/{array_id}/consistency-groups/{cgId}/snapshots/{sequenceNumber}/rollback",
"cgViews" : "/storage-systems/{array_id}/consistency-groups/{cgId}/views",
"global_events" : "/events",
"async-mirrors" : "/storage-systems/{array_id}/async-mirrors",
"async-mirror" : "/storage-systems/{array_id}/async-mirrors/{id}",
"async-mirror-pairs" : "/storage-systems/{array_id}/async-mirrors/{mirror_id}/pairs",
"async-mirror-pair" : "/storage-systems/{array_id}/async-mirrors/{mirror_id}/pairs/{id}",
"async-mirror-progress" : "/storage-systems/{array_id}/async-mirrors/{id}/progress",
"async-mirror-resume" : "/storage-systems/{array_id}/async-mirrors/{id}/resume",
"async-mirror-role" : "/storage-systems/{array_id}/async-mirrors/{id}/role",
"async-mirror-suspend" : "/storage-systems/{array_id}/async-mirrors/{id}/suspend",
"async-mirror-sync" : "/storage-systems/{array_id}/async-mirrors/{id}/sync",
"async-mirror-test" : "/storage-systems/{array_id}/async-mirrors/{id}/test",
"async-mirror-targets" : "/storage-systems/{array_id}/async-mirrors/arvm-arrays",
"ethernet-interfaces":"/storage-systems/{array_id}/configuration/ethernet-interfaces",
}
class ArrayInaccessibleException(Exception):
def __init__(self, message):
super(Exception, ArrayInaccessibleException).__init__(self, message)
self.message = message
class RestException(Exception):
def __init__(self, status_code, message):
super(Exception, RestException).__init__(self, message)
self.message = message
self.status_code = status_code
def __str__(self):
return "Bad status '{}': {}".format(self.status_code, self.message)
@contextlib.contextmanager
def array_controller(addresses, id=None, wwn=None, password=None, retries=10):
postData = {'controllerAddresses' : addresses, 'wwn' : wwn, 'password' : password, "id" : id}
array = generic_post('storage-systems', postData)
try:
for i in range(retries):
array = generic_get('storage-system', array_id=array['id'])
status = array['status']
if(status == 'neverContacted'):
time.sleep(5)
else:
break
if(status == 'neverContacted' or status == 'inaccessible'):
raise ArrayInaccessibleException("Unable to access array {}!".format(array['id']))
yield array
except Exception:
raise
def generic_get (object_type, query_string=None, **params):
"""Performs a GET request on the provided object_type
:param object_type -- an object type from the resources listing in the configuration file
:param params -- keyword arguments (when required) to complete the URL
:param query_string -- dict that specifies the query string arguments
Returns: json
"""
url = base_url + resources[object_type].format(**params)
req = session.get(url, params=query_string)
return handleResponse(req)
def generic_delete (object_type, query_string=None, **params):
"""Performs a DELETE request on the provided object_type
:param object_type -- an object type from the resources listing in the configuration file
:param params -- keyword arguments (when required) to complete the URL
:param query_string -- dict that specifies the query string arguments
RETURNS: Status code for the http request
"""
url = base_url + resources[object_type].format(**params)
req = session.delete(url, params=query_string)
return handleResponse(req)
def generic_post (object_type, data, query_string=None, **params):
"""Performs a POST request on the provided object_type
:param object_type -- an object type from the resources listing in the configuration file
:param data -- parameters provided as a dict to create the object in question
:param params -- keyword arguments (when required) to complete the URL
:param query_string -- dict that specifies the query string arguments
RETURNS: json
"""
url = base_url + resources[object_type].format(**params)
req = session.post(url, data=json.dumps(data), params=query_string)
return handleResponse(req)
def handleResponse(req):
if(req.status_code >= 300):
try:
response = req.json()
raise RestException(req.status_code, response)
except ValueError:
raise RestException(req.status_code, "")
if(req.status_code == 204):
return None
return req.json()
| apache-2.0 |
mikedanese/test-infra | experiment/flakedetector.py | 5 | 2853 | #!/usr/bin/env python3
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Counts the number of flakes in PRs using data from prow.
A flake is counted if a job passes and fails for the same pull commit. This
isn't a perfect signal, since something might have happened on master that
makes it flake, but I think it's good enough.
There will also be false negatives: flakes that don't show up here because the
PR author changed the PR. Still, this is a good signal.
The chance of hitting a flake is calculated assuming they are uncorrelated,
which should be a pretty good assumption unless we have a short outage. For
instance, if all tests start to fail for just a few minutes, then a whole
bunch of tests will fail at once, then succeed on a rerun. This will cause the
calculated chance of hitting a flake to be overestimated.
"""
import operator
import requests
def main():
"""run flake detector."""
res = requests.get('https://prow.k8s.io/data.js')
data = res.json()
jobs = {}
for job in data:
if job['type'] != 'presubmit':
continue
if job['repo'] != 'kubernetes/kubernetes':
continue
if job['state'] != 'success' and job['state'] != 'failure':
continue
if job['job'] not in jobs:
jobs[job['job']] = {}
if job['pull_sha'] not in jobs[job['job']]:
jobs[job['job']][job['pull_sha']] = []
jobs[job['job']][job['pull_sha']].append(job['state'])
job_commits = {}
job_flakes = {}
for job, commits in jobs.items():
job_commits[job] = len(commits)
job_flakes[job] = 0
for results in commits.values():
if 'success' in results and 'failure' in results:
job_flakes[job] += 1
print 'Certain flakes from the last day:'
total_success_chance = 1.0
for job, flakes in sorted(job_flakes.items(), key=operator.itemgetter(1), reverse=True):
if job_commits[job] < 10:
continue
fail_chance = flakes / job_commits[job]
total_success_chance *= (1.0 - fail_chance)
print '{}/{}\t({:.0f}%)\t{}'.format(flakes, job_commits[job], 100*fail_chance, job)
print 'Chance that a PR hits a flake: {:.0f}%'.format(100*(1-total_success_chance))
if __name__ == '__main__':
main()
| apache-2.0 |
dragondjf/QtPython | python/modbus_tk/simulator.py | 1 | 12645 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Modbus TestKit: Implementation of Modbus protocol in python
(C)2009 - Luc Jean - luc.jean@gmail.com
(C)2009 - Apidev - http://www.apidev.fr
This is distributed under GNU LGPL license, see license.txt
The modbus_tk simulator is a console application which is running a server with TCP and RTU communication
It is possible to interact with the server from the command line or from a RPC (Remote Process Call)
"""
from __future__ import print_function
import ctypes
import os
import sys
import select
import serial
import threading
import time
import modbus_tk
from modbus_tk import hooks
from modbus_tk import modbus
from modbus_tk import modbus_tcp
from modbus_tk import modbus_rtu
if modbus_tk.utils.PY2:
import Queue as queue
import SocketServer
else:
import queue
import socketserver as SocketServer
# add logging capability
LOGGER = modbus_tk.utils.create_logger(name="console", record_format="%(message)s")
# The communication between the server and the user interfaces (console or rpc) are done through queues
# command received from the interfaces
INPUT_QUEUE = queue.Queue()
# response to be sent back by the interfaces
OUTPUT_QUEUE = queue.Queue()
class CompositeServer(modbus.Server):
"""make possible to have several servers sharing the same databank"""
def __init__(self, list_of_server_classes, list_of_server_args, databank=None):
"""Constructor"""
super(CompositeServer, self).__init__(databank)
self._servers = [
the_class(*the_args, **{"databank": self.get_db()})
for the_class, the_args in zip(list_of_server_classes, list_of_server_args)
if issubclass(the_class, modbus.Server)
]
def set_verbose(self, verbose):
"""if verbose is true the sent and received packets will be logged"""
for srv in self._servers:
srv.set_verbose(verbose)
def _make_thread(self):
"""should initialize the main thread of the server. You don't need it here"""
pass
def _make_query(self):
"""Returns an instance of a Query subclass implementing the MAC layer protocol"""
raise NotImplementedError()
def start(self):
"""Start the server. It will handle request"""
for srv in self._servers:
srv.start()
def stop(self):
"""stop the server. It doesn't handle request anymore"""
for srv in self._servers:
srv.stop()
class RpcHandler(SocketServer.BaseRequestHandler):
"""An instance of this class is created every time an RPC call is received by the server"""
def handle(self):
"""This function is called automatically by the SocketServer"""
# self.request is the TCP socket connected to the client
# read the incoming command
request = self.request.recv(1024).strip()
# write to the queue waiting to be processed by the server
INPUT_QUEUE.put(request)
# wait for the server answer in the output queue
response = OUTPUT_QUEUE.get(timeout=5.0)
# send back the answer
self.request.send(response)
class RpcInterface(threading.Thread):
"""Manage RPC call over TCP/IP thanks to the SocketServer module"""
def __init__(self):
"""Constructor"""
super(RpcInterface, self).__init__()
self.rpc_server = SocketServer.TCPServer(("", 2711), RpcHandler)
def run(self):
"""run the server and wait that it returns"""
self.rpc_server.serve_forever(0.5)
def close(self):
"""force the socket server to exit"""
try:
self.rpc_server.shutdown()
self.join(1.0)
except Exception:
LOGGER.warning("An error occurred while closing RPC interface")
class ConsoleInterface(threading.Thread):
"""Manage user actions from the console"""
def __init__(self):
"""constructor: initialize communication with the console"""
super(ConsoleInterface, self).__init__()
self.inq = INPUT_QUEUE
self.outq = OUTPUT_QUEUE
if os.name == "nt":
ctypes.windll.Kernel32.GetStdHandle.restype = ctypes.c_ulong
self.console_handle = ctypes.windll.Kernel32.GetStdHandle(ctypes.c_ulong(0xfffffff5))
ctypes.windll.Kernel32.WaitForSingleObject.restype = ctypes.c_ulong
elif os.name == "posix":
# select already imported
pass
else:
raise Exception("%s platform is not supported yet" % os.name)
self._go = threading.Event()
self._go.set()
def _check_console_input(self):
"""test if there is something to read on the console"""
if os.name == "nt":
if 0 == ctypes.windll.Kernel32.WaitForSingleObject(self.console_handle, 500):
return True
elif os.name == "posix":
(inputready, abcd, efgh) = select.select([sys.stdin], [], [], 0.5)
if len(inputready) > 0:
return True
else:
raise Exception("%s platform is not supported yet" % os.name)
return False
def run(self):
"""read from the console, transfer to the server and write the answer"""
while self._go.isSet(): #while app is running
if self._check_console_input(): #if something to read on the console
cmd = sys.stdin.readline() #read it
self.inq.put(cmd) #dispatch it tpo the server
response = self.outq.get(timeout=2.0) #wait for an answer
sys.stdout.write(response) #write the answer on the console
def close(self):
"""terminates the thread"""
self._go.clear()
self.join(1.0)
class Simulator(object):
"""The main class of the app in charge of running everything"""
def __init__(self, server=None):
"""Constructor"""
if server is None:
self.server = CompositeServer([modbus_rtu.RtuServer, modbus_tcp.TcpServer], [(serial.Serial(0),), ()])
else:
self.server = server
self.rpc = RpcInterface()
self.console = ConsoleInterface()
self.inq, self.outq = INPUT_QUEUE, OUTPUT_QUEUE
self._hooks_fct = {}
self.cmds = {
"add_slave": self._do_add_slave,
"has_slave": self._do_has_slave,
"remove_slave": self._do_remove_slave,
"remove_all_slaves": self._do_remove_all_slaves,
"add_block": self._do_add_block,
"remove_block": self._do_remove_block,
"remove_all_blocks": self._do_remove_all_blocks,
"set_values": self._do_set_values,
"get_values": self._do_get_values,
"install_hook": self._do_install_hook,
"uninstall_hook": self._do_uninstall_hook,
"set_verbose": self._do_set_verbose,
}
def add_command(self, name, fct):
"""add a custom command"""
self.cmds[name] = fct
def start(self):
"""run the servers"""
self.server.start()
self.console.start()
self.rpc.start()
LOGGER.info("modbus_tk.simulator is running...")
self._handle()
def declare_hook(self, fct_name, fct):
"""declare a hook function by its name. It must be installed by an install hook command"""
self._hooks_fct[fct_name] = fct
def _tuple_to_str(self, the_tuple):
"""convert a tuple to a string"""
ret = ""
for item in the_tuple:
ret += (" " + str(item))
return ret[1:]
def _do_add_slave(self, args):
"""execute the add_slave command"""
slave_id = int(args[1])
self.server.add_slave(slave_id)
return "{0}".format(slave_id)
def _do_has_slave(self, args):
"""execute the has_slave command"""
slave_id = int(args[1])
try:
self.server.get_slave(slave_id)
except Exception:
return "0"
return "1"
def _do_remove_slave(self, args):
"""execute the remove_slave command"""
slave_id = int(args[1])
self.server.remove_slave(slave_id)
return ""
def _do_remove_all_slaves(self, args):
"""execute the remove_slave command"""
self.server.remove_all_slaves()
return ""
def _do_add_block(self, args):
"""execute the add_block command"""
slave_id = int(args[1])
name = args[2]
block_type = int(args[3])
starting_address = int(args[4])
length = int(args[5])
slave = self.server.get_slave(slave_id)
slave.add_block(name, block_type, starting_address, length)
return name
def _do_remove_block(self, args):
"""execute the remove_block command"""
slave_id = int(args[1])
name = args[2]
slave = self.server.get_slave(slave_id)
slave.remove_block(name)
def _do_remove_all_blocks(self, args):
"""execute the remove_all_blocks command"""
slave_id = int(args[1])
slave = self.server.get_slave(slave_id)
slave.remove_all_blocks()
def _do_set_values(self, args):
"""execute the set_values command"""
slave_id = int(args[1])
name = args[2]
address = int(args[3])
values = []
for val in args[4:]:
values.append(int(val))
slave = self.server.get_slave(slave_id)
slave.set_values(name, address, values)
values = slave.get_values(name, address, len(values))
return self._tuple_to_str(values)
def _do_get_values(self, args):
"""execute the get_values command"""
slave_id = int(args[1])
name = args[2]
address = int(args[3])
length = int(args[4])
slave = self.server.get_slave(slave_id)
values = slave.get_values(name, address, length)
return self._tuple_to_str(values)
def _do_install_hook(self, args):
"""install a function as a hook"""
hook_name = args[1]
fct_name = args[2]
hooks.install_hook(hook_name, self._hooks_fct[fct_name])
def _do_uninstall_hook(self, args):
"""
uninstall a function as a hook.
If no function is given, uninstall all functions
"""
hook_name = args[1]
try:
hooks.uninstall_hook(hook_name)
except KeyError as exception:
LOGGER.error(str(exception))
def _do_set_verbose(self, args):
"""change the verbosity of the server"""
verbose = int(args[1])
self.server.set_verbose(verbose)
return "%d" % verbose
def _handle(self):
"""almost-for-ever loop in charge of listening for command and executing it"""
while True:
cmd = self.inq.get()
args = cmd.strip('\r\n').split(' ')
if cmd.find('quit') == 0:
self.outq.put('bye-bye\r\n')
break
elif args[0] in self.cmds:
try:
answer = self.cmds[args[0]](args)
self.outq.put("%s done: %s\r\n" % (args[0], answer))
except Exception as msg:
self.outq.put("%s error: %s\r\n" % (args[0], msg))
else:
self.outq.put("error: unknown command %s\r\n" % (args[0]))
def close(self):
"""close every server"""
self.console.close()
self.rpc.close()
self.server.stop()
def print_me(args):
"""hook function example"""
request = args[1]
print("print_me: len = ", len(request))
def run_simulator():
"""run simulator"""
simulator = Simulator()
try:
LOGGER.info("'quit' for closing the server")
simulator.declare_hook("print_me", print_me)
simulator.start()
except Exception as exception:
print(exception)
finally:
simulator.close()
LOGGER.info("modbus_tk.simulator has stopped!")
# In python 2.5, the SocketServer shutdown is not working Ok
# The 2 lines below are an ugly temporary workaround
time.sleep(1.0)
sys.exit()
if __name__ == "__main__":
run_simulator()
| gpl-2.0 |
fourthskyinteractive/dolphin | Tools/symbolicate-ppc.py | 132 | 3189 | #!/usr/bin/python
# This filter replace all occurences of JIT_PPC_${address} by a
# corresponding function name JIT_PPC_${symbol} as defined by a .map file.
# TODO, add an option to append the block address (JIT_PPC_${symbol}@${addr})
# Example 1: guest function profiling (excluding host callees)
#
# $ perf record -t $tid
# $ perf script | sed 's/.*cycles: *[0-9a-f]* *//' |
# python Tools/symbolicate-ppc.py ~/.dolphin-emu/Maps/${map}.map |
# rankor -r | head
# 10.05% JIT_Loop (/tmp/perf-15936.map)
# 3.73% [unknown] (/tmp/perf-15936.map)
# 1.91% VideoBackendHardware::Video_GatherPipeBursted (/opt/dolphin-2015-05-06/bin/dolphin-emu)
# 1.39% JIT_PPC_PSMTXConcat (/tmp/perf-15936.map)
# 1.00% JIT_PPC_zz_051754c_ (/tmp/perf-15936.map)
# 0.90% JIT_PPC_zz_051751c_ (/tmp/perf-15936.map)
# 0.71% JIT_PPC_zz_04339d4_ (/tmp/perf-15936.map)
# 0.59% JIT_PPC_zz_05173e0_ (/tmp/perf-15936.map)
# 0.57% JIT_PPC_zz_044141c_ (/tmp/perf-15936.map)
# 0.54% JIT_PPC_zz_01839cc_ (/tmp/perf-15936.map)
# Example 2: guest function profiling (including host callees)
#
# $ perf record --call-graph dwarf -t $tid
# $ perf script | stackcollapse-perf.pl | sed 's/^CPU;//' |
# python Tools/symbolicate-ppc.py ~/.dolphin-emu/Maps/${map}.map |
# perl -pe 's/^([^; ]*).*? ([0-9]+?)$/\1 \2/' | stackcollapse-recursive.pl |
# awk '{printf "%s %s\n", $2, $1}' | sort -rn | head
# 5811 JIT_Loop
# 2396 [unknown]
# 577 JIT_PPC_PSMTXConcat
# 464 JIT_PPC___restore_gpr
# 396 JIT_PPC_zz_0517514_
# 313 JIT_PPC_zz_04339d4_
# 290 JIT_PPC_zz_05173e0_
# 285 JIT_PPC_zz_01839cc_
# 277 JIT_PPC_zz_04335ac_
# 269 JIT_PPC_zz_0420b58_
import re
import sys
stdin = sys.stdin
stdout = sys.stdout
class Symbol:
def __init__(self, start, size, name):
self.start = start
self.end = start + size
self.name = name
# Read a .map file: this is a line-oriented file containing mapping from
# the (PowerPC) memory addresses to function names.
# The format is: "%08x %08x %08x %i %s" (address, size, address, 0, name).
# They should be already be sorted.
def read_map(filename):
reg = re.compile("^([0-9a-f]{8}) ([0-9a-f]{8}) ([0-9a-f]{8}) ([0-9]*) (.*)$")
res = []
with open(filename, "r") as f:
for line in f:
match = reg.match(line)
if match:
start = int(match.group(1), 16)
size = int(match.group(2), 16)
name = match.group(5)
res.append(Symbol(start, size, name))
return res
map = read_map(sys.argv[1])
# Do a binary each in the map file in order to find the symbol:
def lookup(address):
i = 0
j = len(map)
while(True):
if (j < i):
return "JIT_PPC_[unknown]"
k = round((j + i) // 2)
if (address < map[k].start):
j = k - 1
elif (address >= map[k].end):
i = k + 1
else:
return "JIT_PPC_" + map[k].name
# Function used to replace given match:
def replace(match):
return lookup(int(match.group(1), 16))
# Process stdin and write to stdout:
for line in stdin:
modline = re.sub('JIT_PPC_([0-9a-f]*)', replace, line)
stdout.write(modline)
| gpl-2.0 |
ArteliaTelemac/PostTelemac | PostTelemac/meshlayerlibs/pyqtgraph/flowchart/FlowchartTemplate_pyqt5.py | 3 | 2409 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './pyqtgraph/flowchart/FlowchartTemplate.ui'
#
# Created: Wed Mar 26 15:09:28 2014
# by: PyQt5 UI code generator 5.0.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(529, 329)
self.selInfoWidget = QtWidgets.QWidget(Form)
self.selInfoWidget.setGeometry(QtCore.QRect(260, 10, 264, 222))
self.selInfoWidget.setObjectName("selInfoWidget")
self.gridLayout = QtWidgets.QGridLayout(self.selInfoWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.selDescLabel = QtWidgets.QLabel(self.selInfoWidget)
self.selDescLabel.setText("")
self.selDescLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.selDescLabel.setWordWrap(True)
self.selDescLabel.setObjectName("selDescLabel")
self.gridLayout.addWidget(self.selDescLabel, 0, 0, 1, 1)
self.selNameLabel = QtWidgets.QLabel(self.selInfoWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.selNameLabel.setFont(font)
self.selNameLabel.setText("")
self.selNameLabel.setObjectName("selNameLabel")
self.gridLayout.addWidget(self.selNameLabel, 0, 1, 1, 1)
self.selectedTree = DataTreeWidget(self.selInfoWidget)
self.selectedTree.setObjectName("selectedTree")
self.selectedTree.headerItem().setText(0, "1")
self.gridLayout.addWidget(self.selectedTree, 1, 0, 1, 2)
self.hoverText = QtWidgets.QTextEdit(Form)
self.hoverText.setGeometry(QtCore.QRect(0, 240, 521, 81))
self.hoverText.setObjectName("hoverText")
self.view = FlowchartGraphicsView(Form)
self.view.setGeometry(QtCore.QRect(0, 0, 256, 192))
self.view.setObjectName("view")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "PyQtGraph"))
from ..widgets.DataTreeWidget import DataTreeWidget
from ..flowchart.FlowchartGraphicsView import FlowchartGraphicsView
| gpl-3.0 |
siovene/easy-thumbnails | easy_thumbnails/get_version.py | 13 | 1911 | from __future__ import unicode_literals
import datetime
import os
import subprocess
def get_version(version=None):
"Returns a PEP 386-compliant version number from VERSION."
if version is None:
from easy_thumbnails import VERSION as version
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:parts])
sub = ''
if version[3] == 'post':
sub_v = version[4] or get_git_changeset()
sub = '.post%s' % sub_v
elif version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return str(main + sub)
def get_git_changeset():
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen(
'git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
| bsd-3-clause |
dominicelse/scipy | scipy/stats/_continuous_distns.py | 6 | 146462 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy.misc.doccer import inherit_docstring_from
from scipy import optimize
from scipy import integrate
import scipy.special as sc
from scipy._lib._numpy_compat import broadcast_to
from . import _stats
from ._tukeylambda_stats import (tukeylambda_variance as _tlvar,
tukeylambda_kurtosis as _tlkurt)
from ._distn_infrastructure import (get_distribution_names, _kurtosis,
_lazyselect, _lazywhere, _ncx2_cdf,
_ncx2_log_pdf, _ncx2_pdf,
rv_continuous, _skew, valarray)
from ._constants import _XMIN, _EULER, _ZETA3, _XMAX, _LOGXMAX
## Kolmogorov-Smirnov one-sided and two-sided test statistics
class ksone_gen(rv_continuous):
"""General Kolmogorov-Smirnov one-sided test.
%(default)s
"""
def _cdf(self, x, n):
return 1.0 - sc.smirnov(n, x)
def _ppf(self, q, n):
return sc.smirnovi(n, 1.0 - q)
ksone = ksone_gen(a=0.0, name='ksone')
class kstwobign_gen(rv_continuous):
"""Kolmogorov-Smirnov two-sided test for large N.
%(default)s
"""
def _cdf(self, x):
return 1.0 - sc.kolmogorov(x)
def _sf(self, x):
return sc.kolmogorov(x)
def _ppf(self, q):
return sc.kolmogi(1.0 - q)
kstwobign = kstwobign_gen(a=0.0, name='kstwobign')
## Normal distribution
# loc = mu, scale = std
# Keep these implementations out of the class definition so they can be reused
# by other distributions.
_norm_pdf_C = np.sqrt(2*np.pi)
_norm_pdf_logC = np.log(_norm_pdf_C)
def _norm_pdf(x):
return np.exp(-x**2/2.0) / _norm_pdf_C
def _norm_logpdf(x):
return -x**2 / 2.0 - _norm_pdf_logC
def _norm_cdf(x):
return sc.ndtr(x)
def _norm_logcdf(x):
return sc.log_ndtr(x)
def _norm_ppf(q):
return sc.ndtri(q)
def _norm_sf(x):
return _norm_cdf(-x)
def _norm_logsf(x):
return _norm_logcdf(-x)
def _norm_isf(q):
return -_norm_ppf(q)
class norm_gen(rv_continuous):
"""A normal continuous random variable.
The location (loc) keyword specifies the mean.
The scale (scale) keyword specifies the standard deviation.
%(before_notes)s
Notes
-----
The probability density function for `norm` is::
norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
The survival function, ``norm.sf``, is also referred to as the
Q-function in some contexts (see, e.g.,
`Wikipedia's <https://en.wikipedia.org/wiki/Q-function>`_ definition).
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.standard_normal(self._size)
def _pdf(self, x):
return _norm_pdf(x)
def _logpdf(self, x):
return _norm_logpdf(x)
def _cdf(self, x):
return _norm_cdf(x)
def _logcdf(self, x):
return _norm_logcdf(x)
def _sf(self, x):
return _norm_sf(x)
def _logsf(self, x):
return _norm_logsf(x)
def _ppf(self, q):
return _norm_ppf(q)
def _isf(self, q):
return _norm_isf(q)
def _stats(self):
return 0.0, 1.0, 0.0, 0.0
def _entropy(self):
return 0.5*(np.log(2*np.pi)+1)
@inherit_docstring_from(rv_continuous)
def fit(self, data, **kwds):
"""%(super)s
This function (norm_gen.fit) uses explicit formulas for the maximum
likelihood estimation of the parameters, so the `optimizer` argument
is ignored.
"""
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if floc is None:
loc = data.mean()
else:
loc = floc
if fscale is None:
scale = np.sqrt(((data - loc)**2).mean())
else:
scale = fscale
return loc, scale
norm = norm_gen(name='norm')
class alpha_gen(rv_continuous):
"""An alpha continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `alpha` is::
alpha.pdf(x, a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2),
where ``Phi(alpha)`` is the normal CDF, ``x > 0``, and ``a > 0``.
`alpha` takes ``a`` as a shape parameter.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, a):
return 1.0/(x**2)/_norm_cdf(a)*_norm_pdf(a-1.0/x)
def _logpdf(self, x, a):
return -2*np.log(x) + _norm_logpdf(a-1.0/x) - np.log(_norm_cdf(a))
def _cdf(self, x, a):
return _norm_cdf(a-1.0/x) / _norm_cdf(a)
def _ppf(self, q, a):
return 1.0/np.asarray(a-sc.ndtri(q*_norm_cdf(a)))
def _stats(self, a):
return [np.inf]*2 + [np.nan]*2
alpha = alpha_gen(a=0.0, name='alpha')
class anglit_gen(rv_continuous):
"""An anglit continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `anglit` is::
anglit.pdf(x) = sin(2*x + pi/2) = cos(2*x),
for ``-pi/4 <= x <= pi/4``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return np.cos(2*x)
def _cdf(self, x):
return np.sin(x+np.pi/4)**2.0
def _ppf(self, q):
return np.arcsin(np.sqrt(q))-np.pi/4
def _stats(self):
return 0.0, np.pi*np.pi/16-0.5, 0.0, -2*(np.pi**4 - 96)/(np.pi*np.pi-8)**2
def _entropy(self):
return 1-np.log(2)
anglit = anglit_gen(a=-np.pi/4, b=np.pi/4, name='anglit')
class arcsine_gen(rv_continuous):
"""An arcsine continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `arcsine` is::
arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
for ``0 < x < 1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
return 1.0/np.pi/np.sqrt(x*(1-x))
def _cdf(self, x):
return 2.0/np.pi*np.arcsin(np.sqrt(x))
def _ppf(self, q):
return np.sin(np.pi/2.0*q)**2.0
def _stats(self):
mu = 0.5
mu2 = 1.0/8
g1 = 0
g2 = -3.0/2.0
return mu, mu2, g1, g2
def _entropy(self):
return -0.24156447527049044468
arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine')
class FitDataError(ValueError):
# This exception is raised by, for example, beta_gen.fit when both floc
# and fscale are fixed and there are values in the data not in the open
# interval (floc, floc+fscale).
def __init__(self, distr, lower, upper):
self.args = (
"Invalid values in `data`. Maximum likelihood "
"estimation with {distr!r} requires that {lower!r} < x "
"< {upper!r} for each x in `data`.".format(
distr=distr, lower=lower, upper=upper),
)
class FitSolverError(RuntimeError):
# This exception is raised by, for example, beta_gen.fit when
# optimize.fsolve returns with ier != 1.
def __init__(self, mesg):
emsg = "Solver for the MLE equations failed to converge: "
emsg += mesg.replace('\n', '')
self.args = (emsg,)
def _beta_mle_a(a, b, n, s1):
# The zeros of this function give the MLE for `a`, with
# `b`, `n` and `s1` given. `s1` is the sum of the logs of
# the data. `n` is the number of data points.
psiab = sc.psi(a + b)
func = s1 - n * (-psiab + sc.psi(a))
return func
def _beta_mle_ab(theta, n, s1, s2):
# Zeros of this function are critical points of
# the maximum likelihood function. Solving this system
# for theta (which contains a and b) gives the MLE for a and b
# given `n`, `s1` and `s2`. `s1` is the sum of the logs of the data,
# and `s2` is the sum of the logs of 1 - data. `n` is the number
# of data points.
a, b = theta
psiab = sc.psi(a + b)
func = [s1 - n * (-psiab + sc.psi(a)),
s2 - n * (-psiab + sc.psi(b))]
return func
class beta_gen(rv_continuous):
"""A beta continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `beta` is::
gamma(a+b) * x**(a-1) * (1-x)**(b-1)
beta.pdf(x, a, b) = ------------------------------------
gamma(a)*gamma(b)
for ``0 < x < 1``, ``a > 0``, ``b > 0``, where ``gamma(z)`` is the gamma
function (`scipy.special.gamma`).
`beta` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, a, b):
return self._random_state.beta(a, b, self._size)
def _pdf(self, x, a, b):
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
lPx = sc.xlog1py(b - 1.0, -x) + sc.xlogy(a - 1.0, x)
lPx -= sc.betaln(a, b)
return lPx
def _cdf(self, x, a, b):
return sc.btdtr(a, b, x)
def _ppf(self, q, a, b):
return sc.btdtri(a, b, q)
def _stats(self, a, b):
mn = a*1.0 / (a + b)
var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0
g1 = 2.0*(b-a)*np.sqrt((1.0+a+b)/(a*b)) / (2+a+b)
g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b))
g2 /= a*b*(a+b+2)*(a+b+3)
return mn, var, g1, g2
def _fitstart(self, data):
g1 = _skew(data)
g2 = _kurtosis(data)
def func(x):
a, b = x
sk = 2*(b-a)*np.sqrt(a + b + 1) / (a + b + 2) / np.sqrt(a*b)
ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
ku /= a*b*(a+b+2)*(a+b+3)
ku *= 6
return [sk-g1, ku-g2]
a, b = optimize.fsolve(func, (1.0, 1.0))
return super(beta_gen, self)._fitstart(data, args=(a, b))
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
"""%(super)s
In the special case where both `floc` and `fscale` are given, a
`ValueError` is raised if any value `x` in `data` does not satisfy
`floc < x < floc + fscale`.
"""
# Override rv_continuous.fit, so we can more efficiently handle the
# case where floc and fscale are given.
f0 = (kwds.get('f0', None) or kwds.get('fa', None) or
kwds.get('fix_a', None))
f1 = (kwds.get('f1', None) or kwds.get('fb', None) or
kwds.get('fix_b', None))
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None or fscale is None:
# do general fit
return super(beta_gen, self).fit(data, *args, **kwds)
if f0 is not None and f1 is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Special case: loc and scale are constrained, so we are fitting
# just the shape parameters. This can be done much more efficiently
# than the method used in `rv_continuous.fit`. (See the subsection
# "Two unknown parameters" in the section "Maximum likelihood" of
# the Wikipedia article on the Beta distribution for the formulas.)
# Normalize the data to the interval [0, 1].
data = (np.ravel(data) - floc) / fscale
if np.any(data <= 0) or np.any(data >= 1):
raise FitDataError("beta", lower=floc, upper=floc + fscale)
xbar = data.mean()
if f0 is not None or f1 is not None:
# One of the shape parameters is fixed.
if f0 is not None:
# The shape parameter a is fixed, so swap the parameters
# and flip the data. We always solve for `a`. The result
# will be swapped back before returning.
b = f0
data = 1 - data
xbar = 1 - xbar
else:
b = f1
# Initial guess for a. Use the formula for the mean of the beta
# distribution, E[x] = a / (a + b), to generate a reasonable
# starting point based on the mean of the data and the given
# value of b.
a = b * xbar / (1 - xbar)
# Compute the MLE for `a` by solving _beta_mle_a.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_a, a,
args=(b, len(data), np.log(data).sum()),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a = theta[0]
if f0 is not None:
# The shape parameter a was fixed, so swap back the
# parameters.
a, b = b, a
else:
# Neither of the shape parameters is fixed.
# s1 and s2 are used in the extra arguments passed to _beta_mle_ab
# by optimize.fsolve.
s1 = np.log(data).sum()
s2 = sc.log1p(-data).sum()
# Use the "method of moments" to estimate the initial
# guess for a and b.
fac = xbar * (1 - xbar) / data.var(ddof=0) - 1
a = xbar * fac
b = (1 - xbar) * fac
# Compute the MLE for a and b by solving _beta_mle_ab.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_ab, [a, b],
args=(len(data), s1, s2),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a, b = theta
return a, b, floc, fscale
beta = beta_gen(a=0.0, b=1.0, name='beta')
class betaprime_gen(rv_continuous):
"""A beta prime continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `betaprime` is::
betaprime.pdf(x, a, b) = x**(a-1) * (1+x)**(-a-b) / beta(a, b)
for ``x > 0``, ``a > 0``, ``b > 0``, where ``beta(a, b)`` is the beta
function (see `scipy.special.beta`).
`betaprime` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, a, b):
sz, rndm = self._size, self._random_state
u1 = gamma.rvs(a, size=sz, random_state=rndm)
u2 = gamma.rvs(b, size=sz, random_state=rndm)
return u1 / u2
def _pdf(self, x, a, b):
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
return sc.xlogy(a - 1.0, x) - sc.xlog1py(a + b, x) - sc.betaln(a, b)
def _cdf(self, x, a, b):
return sc.betainc(a, b, x/(1.+x))
def _munp(self, n, a, b):
if n == 1.0:
return np.where(b > 1,
a/(b-1.0),
np.inf)
elif n == 2.0:
return np.where(b > 2,
a*(a+1.0)/((b-2.0)*(b-1.0)),
np.inf)
elif n == 3.0:
return np.where(b > 3,
a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)),
np.inf)
elif n == 4.0:
return np.where(b > 4,
(a*(a + 1.0)*(a + 2.0)*(a + 3.0) /
((b - 4.0)*(b - 3.0)*(b - 2.0)*(b - 1.0))),
np.inf)
else:
raise NotImplementedError
betaprime = betaprime_gen(a=0.0, name='betaprime')
class bradford_gen(rv_continuous):
"""A Bradford continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `bradford` is::
bradford.pdf(x, c) = c / (k * (1+c*x)),
for ``0 < x < 1``, ``c > 0`` and ``k = log(1+c)``.
`bradford` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c / (c*x + 1.0) / sc.log1p(c)
def _cdf(self, x, c):
return sc.log1p(c*x) / sc.log1p(c)
def _ppf(self, q, c):
return sc.expm1(q * sc.log1p(c)) / c
def _stats(self, c, moments='mv'):
k = np.log(1.0+c)
mu = (c-k)/(c*k)
mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
g1 = None
g2 = None
if 's' in moments:
g1 = np.sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
g1 /= np.sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
if 'k' in moments:
g2 = (c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3)
+ 6*c*k*k*(3*k-14) + 12*k**3)
g2 /= 3*c*(c*(k-2)+2*k)**2
return mu, mu2, g1, g2
def _entropy(self, c):
k = np.log(1+c)
return k/2.0 - np.log(c/k)
bradford = bradford_gen(a=0.0, b=1.0, name='bradford')
class burr_gen(rv_continuous):
"""A Burr (Type III) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or ``burr12`` with ``d = 1``
burr12 : Burr Type XII distribution
Notes
-----
The probability density function for `burr` is::
burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1)
for ``x > 0``.
`burr` takes ``c`` and ``d`` as shape parameters.
This is the PDF corresponding to the third CDF given in Burr's list;
specifically, it is equation (11) in Burr's paper [1]_.
%(after_notes)s
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c, d):
return c * d * (x**(-c - 1.0)) * ((1 + x**(-c))**(-d - 1.0))
def _cdf(self, x, c, d):
return (1 + x**(-c))**(-d)
def _ppf(self, q, c, d):
return (q**(-1.0/d) - 1)**(-1.0/c)
def _munp(self, n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 - nc, d + nc)
burr = burr_gen(a=0.0, name='burr')
class burr12_gen(rv_continuous):
"""A Burr (Type XII) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or ``burr12`` with ``d = 1``
burr : Burr Type III distribution
Notes
-----
The probability density function for `burr` is::
burr12.pdf(x, c, d) = c * d * x**(c-1) * (1+x**(c))**(-d-1)
for ``x > 0``.
`burr12` takes ``c`` and ``d`` as shape parameters.
This is the PDF corresponding to the twelfth CDF given in Burr's list;
specifically, it is equation (20) in Burr's paper [1]_.
%(after_notes)s
The Burr type 12 distribution is also sometimes referred to as
the Singh-Maddala distribution from NIST [2]_.
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
.. [2] http://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/b12pdf.htm
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c, d):
return np.exp(self._logpdf(x, c, d))
def _logpdf(self, x, c, d):
return np.log(c) + np.log(d) + sc.xlogy(c - 1, x) + sc.xlog1py(-d-1, x**c)
def _cdf(self, x, c, d):
return -sc.expm1(self._logsf(x, c, d))
def _logcdf(self, x, c, d):
return sc.log1p(-(1 + x**c)**(-d))
def _sf(self, x, c, d):
return np.exp(self._logsf(x, c, d))
def _logsf(self, x, c, d):
return sc.xlog1py(-d, x**c)
def _ppf(self, q, c, d):
# The following is an implementation of
# ((1 - q)**(-1.0/d) - 1)**(1.0/c)
# that does a better job handling small values of q.
return sc.expm1(-1/d * sc.log1p(-q))**(1/c)
def _munp(self, n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 + nc, d - nc)
burr12 = burr12_gen(a=0.0, name='burr12')
class fisk_gen(burr_gen):
"""A Fisk continuous random variable.
The Fisk distribution is also known as the log-logistic distribution, and
equals the Burr distribution with ``d == 1``.
`fisk` takes ``c`` as a shape parameter.
%(before_notes)s
Notes
-----
The probability density function for `fisk` is::
fisk.pdf(x, c) = c * x**(-c-1) * (1 + x**(-c))**(-2)
for ``x > 0``.
`fisk` takes ``c`` as a shape parameters.
%(after_notes)s
See Also
--------
burr
%(example)s
"""
def _pdf(self, x, c):
return burr_gen._pdf(self, x, c, 1.0)
def _cdf(self, x, c):
return burr_gen._cdf(self, x, c, 1.0)
def _ppf(self, x, c):
return burr_gen._ppf(self, x, c, 1.0)
def _munp(self, n, c):
return burr_gen._munp(self, n, c, 1.0)
def _entropy(self, c):
return 2 - np.log(c)
fisk = fisk_gen(a=0.0, name='fisk')
# median = loc
class cauchy_gen(rv_continuous):
"""A Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `cauchy` is::
cauchy.pdf(x) = 1 / (pi * (1 + x**2))
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 1.0/np.pi/(1.0+x*x)
def _cdf(self, x):
return 0.5 + 1.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi*q-np.pi/2.0)
def _sf(self, x):
return 0.5 - 1.0/np.pi*np.arctan(x)
def _isf(self, q):
return np.tan(np.pi/2.0-np.pi*q)
def _stats(self):
return np.nan, np.nan, np.nan, np.nan
def _entropy(self):
return np.log(4*np.pi)
def _fitstart(self, data, args=None):
# Initialize ML guesses using quartiles instead of moments.
p25, p50, p75 = np.percentile(data, [25, 50, 75])
return p50, (p75 - p25)/2
cauchy = cauchy_gen(name='cauchy')
class chi_gen(rv_continuous):
"""A chi continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi` is::
chi.pdf(x, df) = x**(df-1) * exp(-x**2/2) / (2**(df/2-1) * gamma(df/2))
for ``x > 0``.
Special cases of `chi` are:
- ``chi(1, loc, scale)`` is equivalent to `halfnorm`
- ``chi(2, 0, scale)`` is equivalent to `rayleigh`
- ``chi(3, 0, scale)`` is equivalent to `maxwell`
`chi` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df):
sz, rndm = self._size, self._random_state
return np.sqrt(chi2.rvs(df, size=sz, random_state=rndm))
def _pdf(self, x, df):
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
l = np.log(2) - .5*np.log(2)*df - sc.gammaln(.5*df)
return l + sc.xlogy(df - 1., x) - .5*x**2
def _cdf(self, x, df):
return sc.gammainc(.5*df, .5*x**2)
def _ppf(self, q, df):
return np.sqrt(2*sc.gammaincinv(.5*df, q))
def _stats(self, df):
mu = np.sqrt(2)*sc.gamma(df/2.0+0.5)/sc.gamma(df/2.0)
mu2 = df - mu*mu
g1 = (2*mu**3.0 + mu*(1-2*df))/np.asarray(np.power(mu2, 1.5))
g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1)
g2 /= np.asarray(mu2**2.0)
return mu, mu2, g1, g2
chi = chi_gen(a=0.0, name='chi')
## Chi-squared (gamma-distributed with loc=0 and scale=2 and shape=df/2)
class chi2_gen(rv_continuous):
"""A chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi2` is::
chi2.pdf(x, df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2)
`chi2` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df):
return self._random_state.chisquare(df, self._size)
def _pdf(self, x, df):
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
return sc.xlogy(df/2.-1, x) - x/2. - sc.gammaln(df/2.) - (np.log(2)*df)/2.
def _cdf(self, x, df):
return sc.chdtr(df, x)
def _sf(self, x, df):
return sc.chdtrc(df, x)
def _isf(self, p, df):
return sc.chdtri(df, p)
def _ppf(self, p, df):
return self._isf(1.0-p, df)
def _stats(self, df):
mu = df
mu2 = 2*df
g1 = 2*np.sqrt(2.0/df)
g2 = 12.0/df
return mu, mu2, g1, g2
chi2 = chi2_gen(a=0.0, name='chi2')
class cosine_gen(rv_continuous):
"""A cosine continuous random variable.
%(before_notes)s
Notes
-----
The cosine distribution is an approximation to the normal distribution.
The probability density function for `cosine` is::
cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
for ``-pi <= x <= pi``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 1.0/2/np.pi*(1+np.cos(x))
def _cdf(self, x):
return 1.0/2/np.pi*(np.pi + x + np.sin(x))
def _stats(self):
return 0.0, np.pi*np.pi/3.0-2.0, 0.0, -6.0*(np.pi**4-90)/(5.0*(np.pi*np.pi-6)**2)
def _entropy(self):
return np.log(4*np.pi)-1.0
cosine = cosine_gen(a=-np.pi, b=np.pi, name='cosine')
class dgamma_gen(rv_continuous):
"""A double gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dgamma` is::
dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x))
for ``a > 0``.
`dgamma` takes ``a`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
sz, rndm = self._size, self._random_state
u = rndm.random_sample(size=sz)
gm = gamma.rvs(a, size=sz, random_state=rndm)
return gm * np.where(u >= 0.5, 1, -1)
def _pdf(self, x, a):
ax = abs(x)
return 1.0/(2*sc.gamma(a))*ax**(a-1.0) * np.exp(-ax)
def _logpdf(self, x, a):
ax = abs(x)
return sc.xlogy(a - 1.0, ax) - ax - np.log(2) - sc.gammaln(a)
def _cdf(self, x, a):
fac = 0.5*sc.gammainc(a, abs(x))
return np.where(x > 0, 0.5 + fac, 0.5 - fac)
def _sf(self, x, a):
fac = 0.5*sc.gammainc(a, abs(x))
return np.where(x > 0, 0.5-fac, 0.5+fac)
def _ppf(self, q, a):
fac = sc.gammainccinv(a, 1-abs(2*q-1))
return np.where(q > 0.5, fac, -fac)
def _stats(self, a):
mu2 = a*(a+1.0)
return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
dgamma = dgamma_gen(name='dgamma')
class dweibull_gen(rv_continuous):
"""A double Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dweibull` is::
dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c)
`dweibull` takes ``d`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, c):
sz, rndm = self._size, self._random_state
u = rndm.random_sample(size=sz)
w = weibull_min.rvs(c, size=sz, random_state=rndm)
return w * (np.where(u >= 0.5, 1, -1))
def _pdf(self, x, c):
ax = abs(x)
Px = c / 2.0 * ax**(c-1.0) * np.exp(-ax**c)
return Px
def _logpdf(self, x, c):
ax = abs(x)
return np.log(c) - np.log(2.0) + sc.xlogy(c - 1.0, ax) - ax**c
def _cdf(self, x, c):
Cx1 = 0.5 * np.exp(-abs(x)**c)
return np.where(x > 0, 1 - Cx1, Cx1)
def _ppf(self, q, c):
fac = 2. * np.where(q <= 0.5, q, 1. - q)
fac = np.power(-np.log(fac), 1.0 / c)
return np.where(q > 0.5, fac, -fac)
def _munp(self, n, c):
return (1 - (n % 2)) * sc.gamma(1.0 + 1.0 * n / c)
# since we know that all odd moments are zeros, return them at once.
# returning Nones from _stats makes the public stats call _munp
# so overall we're saving one or two gamma function evaluations here.
def _stats(self, c):
return 0, None, 0, None
dweibull = dweibull_gen(name='dweibull')
## Exponential (gamma distributed with a=1.0, loc=loc and scale=scale)
class expon_gen(rv_continuous):
"""An exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `expon` is::
expon.pdf(x) = exp(-x)
for ``x >= 0``.
%(after_notes)s
A common parameterization for `expon` is in terms of the rate parameter
``lambda``, such that ``pdf = lambda * exp(-lambda * x)``. This
parameterization corresponds to using ``scale = 1 / lambda``.
%(example)s
"""
def _rvs(self):
return self._random_state.standard_exponential(self._size)
def _pdf(self, x):
return np.exp(-x)
def _logpdf(self, x):
return -x
def _cdf(self, x):
return -sc.expm1(-x)
def _ppf(self, q):
return -sc.log1p(-q)
def _sf(self, x):
return np.exp(-x)
def _logsf(self, x):
return -x
def _isf(self, q):
return -np.log(q)
def _stats(self):
return 1.0, 1.0, 2.0, 6.0
def _entropy(self):
return 1.0
expon = expon_gen(a=0.0, name='expon')
## Exponentially Modified Normal (exponential distribution
## convolved with a Normal).
## This is called an exponentially modified gaussian on wikipedia
class exponnorm_gen(rv_continuous):
"""An exponentially modified Normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponnorm` is::
exponnorm.pdf(x, K) =
1/(2*K) exp(1/(2 * K**2)) exp(-x / K) * erfc-(x - 1/K) / sqrt(2))
where the shape parameter ``K > 0``.
It can be thought of as the sum of a normally distributed random
value with mean ``loc`` and sigma ``scale`` and an exponentially
distributed random number with a pdf proportional to ``exp(-lambda * x)``
where ``lambda = (K * scale)**(-1)``.
%(after_notes)s
An alternative parameterization of this distribution (for example, in
`Wikipedia <http://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution>`_)
involves three parameters, :math:`\\mu`, :math:`\\lambda` and :math:`\\sigma`.
In the present parameterization this corresponds to having ``loc`` and
``scale`` equal to :math:`\\mu` and :math:`\\sigma`, respectively, and
shape parameter :math:`K = 1/\\sigma\\lambda`.
.. versionadded:: 0.16.0
%(example)s
"""
def _rvs(self, K):
expval = self._random_state.standard_exponential(self._size) * K
gval = self._random_state.standard_normal(self._size)
return expval + gval
def _pdf(self, x, K):
invK = 1.0 / K
exparg = 0.5 * invK**2 - invK * x
# Avoid overflows; setting np.exp(exparg) to the max float works
# all right here
expval = _lazywhere(exparg < _LOGXMAX, (exparg,), np.exp, _XMAX)
return 0.5 * invK * expval * sc.erfc(-(x - invK) / np.sqrt(2))
def _logpdf(self, x, K):
invK = 1.0 / K
exparg = 0.5 * invK**2 - invK * x
return exparg + np.log(0.5 * invK * sc.erfc(-(x - invK) / np.sqrt(2)))
def _cdf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
return _norm_cdf(x) - np.exp(expval) * _norm_cdf(x - invK)
def _sf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
return _norm_cdf(-x) + np.exp(expval) * _norm_cdf(x - invK)
def _stats(self, K):
K2 = K * K
opK2 = 1.0 + K2
skw = 2 * K**3 * opK2**(-1.5)
krt = 6.0 * K2 * K2 * opK2**(-2)
return K, opK2, skw, krt
exponnorm = exponnorm_gen(name='exponnorm')
class exponweib_gen(rv_continuous):
"""An exponentiated Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponweib` is::
exponweib.pdf(x, a, c) =
a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1)
for ``x > 0``, ``a > 0``, ``c > 0``.
`exponweib` takes ``a`` and ``c`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, a, c):
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
negxc = -x**c
exm1c = -sc.expm1(negxc)
logp = (np.log(a) + np.log(c) + sc.xlogy(a - 1.0, exm1c) +
negxc + sc.xlogy(c - 1.0, x))
return logp
def _cdf(self, x, a, c):
exm1c = -sc.expm1(-x**c)
return exm1c**a
def _ppf(self, q, a, c):
return (-sc.log1p(-q**(1.0/a)))**np.asarray(1.0/c)
exponweib = exponweib_gen(a=0.0, name='exponweib')
class exponpow_gen(rv_continuous):
"""An exponential power continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponpow` is::
exponpow.pdf(x, b) = b * x**(b-1) * exp(1 + x**b - exp(x**b))
for ``x >= 0``, ``b > 0``. Note that this is a different distribution
from the exponential power distribution that is also known under the names
"generalized normal" or "generalized Gaussian".
`exponpow` takes ``b`` as a shape parameter.
%(after_notes)s
References
----------
http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Exponentialpower.pdf
%(example)s
"""
def _pdf(self, x, b):
return np.exp(self._logpdf(x, b))
def _logpdf(self, x, b):
xb = x**b
f = 1 + np.log(b) + sc.xlogy(b - 1.0, x) + xb - np.exp(xb)
return f
def _cdf(self, x, b):
return -sc.expm1(-sc.expm1(x**b))
def _sf(self, x, b):
return np.exp(-sc.expm1(x**b))
def _isf(self, x, b):
return (sc.log1p(-np.log(x)))**(1./b)
def _ppf(self, q, b):
return pow(sc.log1p(-sc.log1p(-q)), 1.0/b)
exponpow = exponpow_gen(a=0.0, name='exponpow')
class fatiguelife_gen(rv_continuous):
"""A fatigue-life (Birnbaum-Saunders) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `fatiguelife` is::
fatiguelife.pdf(x, c) =
(x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2))
for ``x > 0``.
`fatiguelife` takes ``c`` as a shape parameter.
%(after_notes)s
References
----------
.. [1] "Birnbaum-Saunders distribution",
http://en.wikipedia.org/wiki/Birnbaum-Saunders_distribution
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, c):
z = self._random_state.standard_normal(self._size)
x = 0.5*c*z
x2 = x*x
t = 1.0 + 2*x2 + 2*x*np.sqrt(1 + x2)
return t
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return (np.log(x+1) - (x-1)**2 / (2.0*x*c**2) - np.log(2*c) -
0.5*(np.log(2*np.pi) + 3*np.log(x)))
def _cdf(self, x, c):
return _norm_cdf(1.0 / c * (np.sqrt(x) - 1.0/np.sqrt(x)))
def _ppf(self, q, c):
tmp = c*sc.ndtri(q)
return 0.25 * (tmp + np.sqrt(tmp**2 + 4))**2
def _stats(self, c):
# NB: the formula for kurtosis in wikipedia seems to have an error:
# it's 40, not 41. At least it disagrees with the one from Wolfram
# Alpha. And the latter one, below, passes the tests, while the wiki
# one doesn't So far I didn't have the guts to actually check the
# coefficients from the expressions for the raw moments.
c2 = c*c
mu = c2 / 2.0 + 1.0
den = 5.0 * c2 + 4.0
mu2 = c2*den / 4.0
g1 = 4 * c * (11*c2 + 6.0) / np.power(den, 1.5)
g2 = 6 * c2 * (93*c2 + 40.0) / den**2.0
return mu, mu2, g1, g2
fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife')
class foldcauchy_gen(rv_continuous):
"""A folded Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldcauchy` is::
foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2))
for ``x >= 0``.
`foldcauchy` takes ``c`` as a shape parameter.
%(example)s
"""
def _rvs(self, c):
return abs(cauchy.rvs(loc=c, size=self._size,
random_state=self._random_state))
def _pdf(self, x, c):
return 1.0/np.pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2))
def _cdf(self, x, c):
return 1.0/np.pi*(np.arctan(x-c) + np.arctan(x+c))
def _stats(self, c):
return np.inf, np.inf, np.nan, np.nan
foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy')
class f_gen(rv_continuous):
"""An F continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `f` is::
df2**(df2/2) * df1**(df1/2) * x**(df1/2-1)
F.pdf(x, df1, df2) = --------------------------------------------
(df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2)
for ``x > 0``.
`f` takes ``dfn`` and ``dfd`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, dfn, dfd):
return self._random_state.f(dfn, dfd, self._size)
def _pdf(self, x, dfn, dfd):
return np.exp(self._logpdf(x, dfn, dfd))
def _logpdf(self, x, dfn, dfd):
n = 1.0 * dfn
m = 1.0 * dfd
lPx = m/2 * np.log(m) + n/2 * np.log(n) + (n/2 - 1) * np.log(x)
lPx -= ((n+m)/2) * np.log(m + n*x) + sc.betaln(n/2, m/2)
return lPx
def _cdf(self, x, dfn, dfd):
return sc.fdtr(dfn, dfd, x)
def _sf(self, x, dfn, dfd):
return sc.fdtrc(dfn, dfd, x)
def _ppf(self, q, dfn, dfd):
return sc.fdtri(dfn, dfd, q)
def _stats(self, dfn, dfd):
v1, v2 = 1. * dfn, 1. * dfd
v2_2, v2_4, v2_6, v2_8 = v2 - 2., v2 - 4., v2 - 6., v2 - 8.
mu = _lazywhere(
v2 > 2, (v2, v2_2),
lambda v2, v2_2: v2 / v2_2,
np.inf)
mu2 = _lazywhere(
v2 > 4, (v1, v2, v2_2, v2_4),
lambda v1, v2, v2_2, v2_4:
2 * v2 * v2 * (v1 + v2_2) / (v1 * v2_2**2 * v2_4),
np.inf)
g1 = _lazywhere(
v2 > 6, (v1, v2_2, v2_4, v2_6),
lambda v1, v2_2, v2_4, v2_6:
(2 * v1 + v2_2) / v2_6 * np.sqrt(v2_4 / (v1 * (v1 + v2_2))),
np.nan)
g1 *= np.sqrt(8.)
g2 = _lazywhere(
v2 > 8, (g1, v2_6, v2_8),
lambda g1, v2_6, v2_8: (8 + g1 * g1 * v2_6) / v2_8,
np.nan)
g2 *= 3. / 2.
return mu, mu2, g1, g2
f = f_gen(a=0.0, name='f')
## Folded Normal
## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
##
## note: regress docs have scale parameter correct, but first parameter
## he gives is a shape parameter A = c * scale
## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous):
"""A folded normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldnorm` is::
foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
for ``c >= 0``.
`foldnorm` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return c >= 0
def _rvs(self, c):
return abs(self._random_state.standard_normal(self._size) + c)
def _pdf(self, x, c):
return _norm_pdf(x + c) + _norm_pdf(x-c)
def _cdf(self, x, c):
return _norm_cdf(x-c) + _norm_cdf(x+c) - 1.0
def _stats(self, c):
# Regina C. Elandt, Technometrics 3, 551 (1961)
# http://www.jstor.org/stable/1266561
#
c2 = c*c
expfac = np.exp(-0.5*c2) / np.sqrt(2.*np.pi)
mu = 2.*expfac + c * sc.erf(c/np.sqrt(2))
mu2 = c2 + 1 - mu*mu
g1 = 2. * (mu*mu*mu - c2*mu - expfac)
g1 /= np.power(mu2, 1.5)
g2 = c2 * (c2 + 6.) + 3 + 8.*expfac*mu
g2 += (2. * (c2 - 3.) - 3. * mu**2) * mu**2
g2 = g2 / mu2**2.0 - 3.
return mu, mu2, g1, g2
foldnorm = foldnorm_gen(a=0.0, name='foldnorm')
## Extreme Value Type II or Frechet
## (defined in Regress+ documentation as Extreme LB) as
## a limiting value distribution.
##
class frechet_r_gen(rv_continuous):
"""A Frechet right (or Weibull minimum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_min : The same distribution as `frechet_r`.
frechet_l, weibull_max
Notes
-----
The probability density function for `frechet_r` is::
frechet_r.pdf(x, c) = c * x**(c-1) * exp(-x**c)
for ``x > 0``, ``c > 0``.
`frechet_r` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c*pow(x, c-1)*np.exp(-pow(x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c - 1, x) - pow(x, c)
def _cdf(self, x, c):
return -sc.expm1(-pow(x, c))
def _sf(self, x, c):
return np.exp(-pow(x, c))
def _logsf(self, x, c):
return -pow(x, c)
def _ppf(self, q, c):
return pow(-sc.log1p(-q), 1.0/c)
def _munp(self, n, c):
return sc.gamma(1.0+n*1.0/c)
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
frechet_r = frechet_r_gen(a=0.0, name='frechet_r')
weibull_min = frechet_r_gen(a=0.0, name='weibull_min')
class frechet_l_gen(rv_continuous):
"""A Frechet left (or Weibull maximum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_max : The same distribution as `frechet_l`.
frechet_r, weibull_min
Notes
-----
The probability density function for `frechet_l` is::
frechet_l.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c)
for ``x < 0``, ``c > 0``.
`frechet_l` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c*pow(-x, c-1)*np.exp(-pow(-x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c-1, -x) - pow(-x, c)
def _cdf(self, x, c):
return np.exp(-pow(-x, c))
def _logcdf(self, x, c):
return -pow(-x, c)
def _sf(self, x, c):
return -sc.expm1(-pow(-x, c))
def _ppf(self, q, c):
return -pow(-np.log(q), 1.0/c)
def _munp(self, n, c):
val = sc.gamma(1.0+n*1.0/c)
if int(n) % 2:
sgn = -1
else:
sgn = 1
return sgn * val
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
frechet_l = frechet_l_gen(b=0.0, name='frechet_l')
weibull_max = frechet_l_gen(b=0.0, name='weibull_max')
class genlogistic_gen(rv_continuous):
"""A generalized logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genlogistic` is::
genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1)
for ``x > 0``, ``c > 0``.
`genlogistic` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return np.log(c) - x - (c+1.0)*sc.log1p(np.exp(-x))
def _cdf(self, x, c):
Cx = (1+np.exp(-x))**(-c)
return Cx
def _ppf(self, q, c):
vals = -np.log(pow(q, -1.0/c)-1)
return vals
def _stats(self, c):
mu = _EULER + sc.psi(c)
mu2 = np.pi*np.pi/6.0 + sc.zeta(2, c)
g1 = -2*sc.zeta(3, c) + 2*_ZETA3
g1 /= np.power(mu2, 1.5)
g2 = np.pi**4/15.0 + 6*sc.zeta(4, c)
g2 /= mu2**2.0
return mu, mu2, g1, g2
genlogistic = genlogistic_gen(name='genlogistic')
class genpareto_gen(rv_continuous):
"""A generalized Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genpareto` is::
genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c)
defined for ``x >= 0`` if ``c >=0``, and for
``0 <= x <= -1/c`` if ``c < 0``.
`genpareto` takes ``c`` as a shape parameter.
For ``c == 0``, `genpareto` reduces to the exponential
distribution, `expon`::
genpareto.pdf(x, c=0) = exp(-x)
For ``c == -1``, `genpareto` is uniform on ``[0, 1]``::
genpareto.cdf(x, c=-1) = x
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
c = np.asarray(c)
self.b = _lazywhere(c < 0, (c,),
lambda c: -1. / c,
np.inf)
return True
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.xlog1py(c + 1., c*x) / c,
-x)
def _cdf(self, x, c):
return -sc.inv_boxcox1p(-x, -c)
def _sf(self, x, c):
return sc.inv_boxcox(-x, -c)
def _logsf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.log1p(c*x) / c,
-x)
def _ppf(self, q, c):
return -sc.boxcox1p(-q, -c)
def _isf(self, q, c):
return -sc.boxcox(q, -c)
def _munp(self, n, c):
def __munp(n, c):
val = 0.0
k = np.arange(0, n + 1)
for ki, cnk in zip(k, sc.comb(n, k)):
val = val + cnk * (-1) ** ki / (1.0 - c * ki)
return np.where(c * n < 1, val * (-1.0 / c) ** n, np.inf)
return _lazywhere(c != 0, (c,),
lambda c: __munp(n, c),
sc.gamma(n + 1))
def _entropy(self, c):
return 1. + c
genpareto = genpareto_gen(a=0.0, name='genpareto')
class genexpon_gen(rv_continuous):
"""A generalized exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genexpon` is::
genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \
exp(-a*x - b*x + b/c * (1-exp(-c*x)))
for ``x >= 0``, ``a, b, c > 0``.
`genexpon` takes ``a``, ``b`` and ``c`` as shape parameters.
%(after_notes)s
References
----------
H.K. Ryu, "An Extension of Marshall and Olkin's Bivariate Exponential
Distribution", Journal of the American Statistical Association, 1993.
N. Balakrishnan, "The Exponential Distribution: Theory, Methods and
Applications", Asit P. Basu.
%(example)s
"""
def _pdf(self, x, a, b, c):
return (a + b*(-sc.expm1(-c*x)))*np.exp((-a-b)*x +
b*(-sc.expm1(-c*x))/c)
def _cdf(self, x, a, b, c):
return -sc.expm1((-a-b)*x + b*(-sc.expm1(-c*x))/c)
def _logpdf(self, x, a, b, c):
return np.log(a+b*(-sc.expm1(-c*x))) + (-a-b)*x+b*(-sc.expm1(-c*x))/c
genexpon = genexpon_gen(a=0.0, name='genexpon')
class genextreme_gen(rv_continuous):
"""A generalized extreme value continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r
Notes
-----
For ``c=0``, `genextreme` is equal to `gumbel_r`.
The probability density function for `genextreme` is::
genextreme.pdf(x, c) =
exp(-exp(-x))*exp(-x), for c==0
exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1), for x <= 1/c, c > 0
Note that several sources and software packages use the opposite
convention for the sign of the shape parameter ``c``.
`genextreme` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
self.b = np.where(c > 0, 1.0 / np.maximum(c, _XMIN), np.inf)
self.a = np.where(c < 0, 1.0 / np.minimum(c, -_XMIN), -np.inf)
return np.where(abs(c) == np.inf, 0, 1)
def _loglogcdf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: sc.log1p(-c*x)/c, -x)
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
cx = _lazywhere((x == x) & (c != 0), (x, c), lambda x, c: c*x, 0.0)
logex2 = sc.log1p(-cx)
logpex2 = self._loglogcdf(x, c)
pex2 = np.exp(logpex2)
# Handle special cases
np.putmask(logpex2, (c == 0) & (x == -np.inf), 0.0)
logpdf = np.where((cx == 1) | (cx == -np.inf),
-np.inf,
-pex2+logpex2-logex2)
np.putmask(logpdf, (c == 1) & (x == 1), 0.0)
return logpdf
def _logcdf(self, x, c):
return -np.exp(self._loglogcdf(x, c))
def _cdf(self, x, c):
return np.exp(self._logcdf(x, c))
def _sf(self, x, c):
return -sc.expm1(self._logcdf(x, c))
def _ppf(self, q, c):
x = -np.log(-np.log(q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _isf(self, q, c):
x = -np.log(-sc.log1p(-q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _stats(self, c):
g = lambda n: sc.gamma(n*c + 1)
g1 = g(1)
g2 = g(2)
g3 = g(3)
g4 = g(4)
g2mg12 = np.where(abs(c) < 1e-7, (c*np.pi)**2.0/6.0, g2-g1**2.0)
gam2k = np.where(abs(c) < 1e-7, np.pi**2.0/6.0,
sc.expm1(sc.gammaln(2.0*c+1.0)-2*sc.gammaln(c + 1.0))/c**2.0)
eps = 1e-14
gamk = np.where(abs(c) < eps, -_EULER, sc.expm1(sc.gammaln(c + 1))/c)
m = np.where(c < -1.0, np.nan, -gamk)
v = np.where(c < -0.5, np.nan, g1**2.0*gam2k)
# skewness
sk1 = np.where(c < -1./3, np.nan,
np.sign(c)*(-g3+(g2+2*g2mg12)*g1)/((g2mg12)**(3./2.)))
sk = np.where(abs(c) <= eps**0.29, 12*np.sqrt(6)*_ZETA3/np.pi**3, sk1)
# kurtosis
ku1 = np.where(c < -1./4, np.nan,
(g4+(-4*g3+3*(g2+g2mg12)*g1)*g1)/((g2mg12)**2))
ku = np.where(abs(c) <= (eps)**0.23, 12.0/5.0, ku1-3.0)
return m, v, sk, ku
def _fitstart(self, data):
# This is better than the default shape of (1,).
g = _skew(data)
if g < 0:
a = 0.5
else:
a = -0.5
return super(genextreme_gen, self)._fitstart(data, args=(a,))
def _munp(self, n, c):
k = np.arange(0, n+1)
vals = 1.0/c**n * np.sum(
sc.comb(n, k) * (-1)**k * sc.gamma(c*k + 1),
axis=0)
return np.where(c*n > -1, vals, np.inf)
def _entropy(self, c):
return _EULER*(1 - c) + 1
genextreme = genextreme_gen(name='genextreme')
def _digammainv(y):
# Inverse of the digamma function (real positive arguments only).
# This function is used in the `fit` method of `gamma_gen`.
# The function uses either optimize.fsolve or optimize.newton
# to solve `sc.digamma(x) - y = 0`. There is probably room for
# improvement, but currently it works over a wide range of y:
# >>> y = 64*np.random.randn(1000000)
# >>> y.min(), y.max()
# (-311.43592651416662, 351.77388222276869)
# x = [_digammainv(t) for t in y]
# np.abs(sc.digamma(x) - y).max()
# 1.1368683772161603e-13
#
_em = 0.5772156649015328606065120
func = lambda x: sc.digamma(x) - y
if y > -0.125:
x0 = np.exp(y) + 0.5
if y < 10:
# Some experimentation shows that newton reliably converges
# must faster than fsolve in this y range. For larger y,
# newton sometimes fails to converge.
value = optimize.newton(func, x0, tol=1e-10)
return value
elif y > -3:
x0 = np.exp(y/2.332) + 0.08661
else:
x0 = 1.0 / (-y - _em)
value, info, ier, mesg = optimize.fsolve(func, x0, xtol=1e-11,
full_output=True)
if ier != 1:
raise RuntimeError("_digammainv: fsolve failed, y = %r" % y)
return value[0]
## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition)
## gamma(a, loc, scale) with a an integer is the Erlang distribution
## gamma(1, loc, scale) is the Exponential distribution
## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom.
class gamma_gen(rv_continuous):
"""A gamma continuous random variable.
%(before_notes)s
See Also
--------
erlang, expon
Notes
-----
The probability density function for `gamma` is::
gamma.pdf(x, a) = x**(a-1) * exp(-x) / gamma(a)
for ``x >= 0``, ``a > 0``. Here ``gamma(a)`` refers to the gamma function.
`gamma` has a shape parameter `a` which needs to be set explicitly.
When ``a`` is an integer, `gamma` reduces to the Erlang
distribution, and when ``a=1`` to the exponential distribution.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.standard_gamma(a, self._size)
def _pdf(self, x, a):
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return sc.xlogy(a-1.0, x) - x - sc.gammaln(a)
def _cdf(self, x, a):
return sc.gammainc(a, x)
def _sf(self, x, a):
return sc.gammaincc(a, x)
def _ppf(self, q, a):
return sc.gammaincinv(a, q)
def _stats(self, a):
return a, a, 2.0/np.sqrt(a), 6.0/a
def _entropy(self, a):
return sc.psi(a)*(1-a) + a + sc.gammaln(a)
def _fitstart(self, data):
# The skewness of the gamma distribution is `4 / np.sqrt(a)`.
# We invert that to estimate the shape `a` using the skewness
# of the data. The formula is regularized with 1e-8 in the
# denominator to allow for degenerate data where the skewness
# is close to 0.
a = 4 / (1e-8 + _skew(data)**2)
return super(gamma_gen, self)._fitstart(data, args=(a,))
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
f0 = (kwds.get('f0', None) or kwds.get('fa', None) or
kwds.get('fix_a', None))
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None:
# loc is not fixed. Use the default fit method.
return super(gamma_gen, self).fit(data, *args, **kwds)
# Special case: loc is fixed.
if f0 is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Fixed location is handled by shifting the data.
data = np.asarray(data)
if np.any(data <= floc):
raise FitDataError("gamma", lower=floc, upper=np.inf)
if floc != 0:
# Don't do the subtraction in-place, because `data` might be a
# view of the input array.
data = data - floc
xbar = data.mean()
# Three cases to handle:
# * shape and scale both free
# * shape fixed, scale free
# * shape free, scale fixed
if fscale is None:
# scale is free
if f0 is not None:
# shape is fixed
a = f0
else:
# shape and scale are both free.
# The MLE for the shape parameter `a` is the solution to:
# np.log(a) - sc.digamma(a) - np.log(xbar) + np.log(data.mean) = 0
s = np.log(xbar) - np.log(data).mean()
func = lambda a: np.log(a) - sc.digamma(a) - s
aest = (3-s + np.sqrt((s-3)**2 + 24*s)) / (12*s)
xa = aest*(1-0.4)
xb = aest*(1+0.4)
a = optimize.brentq(func, xa, xb, disp=0)
# The MLE for the scale parameter is just the data mean
# divided by the shape parameter.
scale = xbar / a
else:
# scale is fixed, shape is free
# The MLE for the shape parameter `a` is the solution to:
# sc.digamma(a) - np.log(data).mean() + np.log(fscale) = 0
c = np.log(data).mean() - np.log(fscale)
a = _digammainv(c)
scale = fscale
return a, floc, scale
gamma = gamma_gen(a=0.0, name='gamma')
class erlang_gen(gamma_gen):
"""An Erlang continuous random variable.
%(before_notes)s
See Also
--------
gamma
Notes
-----
The Erlang distribution is a special case of the Gamma distribution, with
the shape parameter `a` an integer. Note that this restriction is not
enforced by `erlang`. It will, however, generate a warning the first time
a non-integer value is used for the shape parameter.
Refer to `gamma` for examples.
"""
def _argcheck(self, a):
allint = np.all(np.floor(a) == a)
allpos = np.all(a > 0)
if not allint:
# An Erlang distribution shouldn't really have a non-integer
# shape parameter, so warn the user.
warnings.warn(
'The shape parameter of the erlang distribution '
'has been given a non-integer value %r.' % (a,),
RuntimeWarning)
return allpos
def _fitstart(self, data):
# Override gamma_gen_fitstart so that an integer initial value is
# used. (Also regularize the division, to avoid issues when
# _skew(data) is 0 or close to 0.)
a = int(4.0 / (1e-8 + _skew(data)**2))
return super(gamma_gen, self)._fitstart(data, args=(a,))
# Trivial override of the fit method, so we can monkey-patch its
# docstring.
def fit(self, data, *args, **kwds):
return super(erlang_gen, self).fit(data, *args, **kwds)
if fit.__doc__ is not None:
fit.__doc__ = (rv_continuous.fit.__doc__ +
"""
Notes
-----
The Erlang distribution is generally defined to have integer values
for the shape parameter. This is not enforced by the `erlang` class.
When fitting the distribution, it will generally return a non-integer
value for the shape parameter. By using the keyword argument
`f0=<integer>`, the fit method can be constrained to fit the data to
a specific integer shape parameter.
""")
erlang = erlang_gen(a=0.0, name='erlang')
class gengamma_gen(rv_continuous):
"""A generalized gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gengamma` is::
gengamma.pdf(x, a, c) = abs(c) * x**(c*a-1) * exp(-x**c) / gamma(a)
for ``x >= 0``, ``a > 0``, and ``c != 0``.
`gengamma` takes ``a`` and ``c`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, c):
return (a > 0) & (c != 0)
def _pdf(self, x, a, c):
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
return np.log(abs(c)) + sc.xlogy(c*a - 1, x) - x**c - sc.gammaln(a)
def _cdf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val1, val2)
def _sf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val2, val1)
def _ppf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val1, val2)**(1.0/c)
def _isf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val2, val1)**(1.0/c)
def _munp(self, n, a, c):
# Pochhammer symbol: sc.pocha,n) = gamma(a+n)/gamma(a)
return sc.poch(a, n*1.0/c)
def _entropy(self, a, c):
val = sc.psi(a)
return a*(1-val) + 1.0/c*val + sc.gammaln(a) - np.log(abs(c))
gengamma = gengamma_gen(a=0.0, name='gengamma')
class genhalflogistic_gen(rv_continuous):
"""A generalized half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genhalflogistic` is::
genhalflogistic.pdf(x, c) =
2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2
for ``0 <= x <= 1/c``, and ``c > 0``.
`genhalflogistic` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
self.b = 1.0 / c
return c > 0
def _pdf(self, x, c):
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp0 = tmp**(limit-1)
tmp2 = tmp0*tmp
return 2*tmp0 / (1+tmp2)**2
def _cdf(self, x, c):
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp2 = tmp**(limit)
return (1.0-tmp2) / (1+tmp2)
def _ppf(self, q, c):
return 1.0/c*(1-((1.0-q)/(1.0+q))**c)
def _entropy(self, c):
return 2 - (2*c+1)*np.log(2)
genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic')
class gompertz_gen(rv_continuous):
"""A Gompertz (or truncated Gumbel) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gompertz` is::
gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1))
for ``x >= 0``, ``c > 0``.
`gompertz` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return np.log(c) + x - c * sc.expm1(x)
def _cdf(self, x, c):
return -sc.expm1(-c * sc.expm1(x))
def _ppf(self, q, c):
return sc.log1p(-1.0 / c * sc.log1p(-q))
def _entropy(self, c):
return 1.0 - np.log(c) - np.exp(c)*sc.expn(1, c)
gompertz = gompertz_gen(a=0.0, name='gompertz')
class gumbel_r_gen(rv_continuous):
"""A right-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_l, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_r` is::
gumbel_r.pdf(x) = exp(-(x + exp(-x)))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return -x - np.exp(-x)
def _cdf(self, x):
return np.exp(-np.exp(-x))
def _logcdf(self, x):
return -np.exp(-x)
def _ppf(self, q):
return -np.log(-np.log(q))
def _stats(self):
return _EULER, np.pi*np.pi/6.0, 12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
# http://en.wikipedia.org/wiki/Gumbel_distribution
return _EULER + 1.
gumbel_r = gumbel_r_gen(name='gumbel_r')
class gumbel_l_gen(rv_continuous):
"""A left-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_l` is::
gumbel_l.pdf(x) = exp(x - exp(x))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return x - np.exp(x)
def _cdf(self, x):
return -sc.expm1(-np.exp(x))
def _ppf(self, q):
return np.log(-sc.log1p(-q))
def _logsf(self, x):
return -np.exp(x)
def _sf(self, x):
return np.exp(-np.exp(x))
def _isf(self, x):
return np.log(-np.log(x))
def _stats(self):
return -_EULER, np.pi*np.pi/6.0, \
-12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return _EULER + 1.
gumbel_l = gumbel_l_gen(name='gumbel_l')
class halfcauchy_gen(rv_continuous):
"""A Half-Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfcauchy` is::
halfcauchy.pdf(x) = 2 / (pi * (1 + x**2))
for ``x >= 0``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 2.0/np.pi/(1.0+x*x)
def _logpdf(self, x):
return np.log(2.0/np.pi) - sc.log1p(x*x)
def _cdf(self, x):
return 2.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi/2*q)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
def _entropy(self):
return np.log(2*np.pi)
halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy')
class halflogistic_gen(rv_continuous):
"""A half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halflogistic` is::
halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2 = 1/2 * sech(x/2)**2
for ``x >= 0``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return np.log(2) - x - 2. * sc.log1p(np.exp(-x))
def _cdf(self, x):
return np.tanh(x/2.0)
def _ppf(self, q):
return 2*np.arctanh(q)
def _munp(self, n):
if n == 1:
return 2*np.log(2)
if n == 2:
return np.pi*np.pi/3.0
if n == 3:
return 9*_ZETA3
if n == 4:
return 7*np.pi**4 / 15.0
return 2*(1-pow(2.0, 1-n))*sc.gamma(n+1)*sc.zeta(n, 1)
def _entropy(self):
return 2-np.log(2)
halflogistic = halflogistic_gen(a=0.0, name='halflogistic')
class halfnorm_gen(rv_continuous):
"""A half-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfnorm` is::
halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2)
for ``x > 0``.
`halfnorm` is a special case of `chi` with ``df == 1``.
%(after_notes)s
%(example)s
"""
def _rvs(self):
return abs(self._random_state.standard_normal(size=self._size))
def _pdf(self, x):
return np.sqrt(2.0/np.pi)*np.exp(-x*x/2.0)
def _logpdf(self, x):
return 0.5 * np.log(2.0/np.pi) - x*x/2.0
def _cdf(self, x):
return _norm_cdf(x)*2-1.0
def _ppf(self, q):
return sc.ndtri((1+q)/2.0)
def _stats(self):
return (np.sqrt(2.0/np.pi), 1-2.0/np.pi, np.sqrt(2)*(4-np.pi)/(np.pi-2)**1.5,
8*(np.pi-3)/(np.pi-2)**2)
def _entropy(self):
return 0.5*np.log(np.pi/2.0)+0.5
halfnorm = halfnorm_gen(a=0.0, name='halfnorm')
class hypsecant_gen(rv_continuous):
"""A hyperbolic secant continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `hypsecant` is::
hypsecant.pdf(x) = 1/pi * sech(x)
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 1.0/(np.pi*np.cosh(x))
def _cdf(self, x):
return 2.0/np.pi*np.arctan(np.exp(x))
def _ppf(self, q):
return np.log(np.tan(np.pi*q/2.0))
def _stats(self):
return 0, np.pi*np.pi/4, 0, 2
def _entropy(self):
return np.log(2*np.pi)
hypsecant = hypsecant_gen(name='hypsecant')
class gausshyper_gen(rv_continuous):
"""A Gauss hypergeometric continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gausshyper` is::
gausshyper.pdf(x, a, b, c, z) =
C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c)
for ``0 <= x <= 1``, ``a > 0``, ``b > 0``, and
``C = 1 / (B(a, b) F[2, 1](c, a; a+b; -z))``
`gausshyper` takes ``a``, ``b``, ``c`` and ``z`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b, c, z):
return (a > 0) & (b > 0) & (c == c) & (z == z)
def _pdf(self, x, a, b, c, z):
Cinv = sc.gamma(a)*sc.gamma(b)/sc.gamma(a+b)*sc.hyp2f1(c, a, a+b, -z)
return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c
def _munp(self, n, a, b, c, z):
fac = sc.beta(n+a, b) / sc.beta(a, b)
num = sc.hyp2f1(c, a+n, a+b+n, -z)
den = sc.hyp2f1(c, a, a+b, -z)
return fac*num / den
gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper')
class invgamma_gen(rv_continuous):
"""An inverted gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgamma` is::
invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x)
for x > 0, a > 0.
`invgamma` takes ``a`` as a shape parameter.
`invgamma` is a special case of `gengamma` with ``c == -1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, a):
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return -(a+1) * np.log(x) - sc.gammaln(a) - 1.0/x
def _cdf(self, x, a):
return sc.gammaincc(a, 1.0 / x)
def _ppf(self, q, a):
return 1.0 / sc.gammainccinv(a, q)
def _sf(self, x, a):
return sc.gammainc(a, 1.0 / x)
def _isf(self, q, a):
return 1.0 / sc.gammaincinv(a, q)
def _stats(self, a, moments='mvsk'):
m1 = _lazywhere(a > 1, (a,), lambda x: 1. / (x - 1.), np.inf)
m2 = _lazywhere(a > 2, (a,), lambda x: 1. / (x - 1.)**2 / (x - 2.),
np.inf)
g1, g2 = None, None
if 's' in moments:
g1 = _lazywhere(
a > 3, (a,),
lambda x: 4. * np.sqrt(x - 2.) / (x - 3.), np.nan)
if 'k' in moments:
g2 = _lazywhere(
a > 4, (a,),
lambda x: 6. * (5. * x - 11.) / (x - 3.) / (x - 4.), np.nan)
return m1, m2, g1, g2
def _entropy(self, a):
return a - (a+1.0) * sc.psi(a) + sc.gammaln(a)
invgamma = invgamma_gen(a=0.0, name='invgamma')
# scale is gamma from DATAPLOT and B from Regress
class invgauss_gen(rv_continuous):
"""An inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgauss` is::
invgauss.pdf(x, mu) = 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
for ``x > 0``.
`invgauss` takes ``mu`` as a shape parameter.
%(after_notes)s
When `mu` is too small, evaluating the cumulative distribution function will be
inaccurate due to ``cdf(mu -> 0) = inf * 0``.
NaNs are returned for ``mu <= 0.0028``.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, mu):
return self._random_state.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/np.sqrt(2*np.pi*x**3.0)*np.exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
return -0.5*np.log(2*np.pi) - 1.5*np.log(x) - ((x-mu)/mu)**2/(2*x)
def _cdf(self, x, mu):
fac = np.sqrt(1.0/x)
# Numerical accuracy for small `mu` is bad. See #869.
C1 = _norm_cdf(fac*(x-mu)/mu)
C1 += np.exp(1.0/mu) * _norm_cdf(-fac*(x+mu)/mu) * np.exp(1.0/mu)
return C1
def _stats(self, mu):
return mu, mu**3.0, 3*np.sqrt(mu), 15*mu
invgauss = invgauss_gen(a=0.0, name='invgauss')
class invweibull_gen(rv_continuous):
"""An inverted Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invweibull` is::
invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c))
for ``x > 0``, ``c > 0``.
`invweibull` takes ``c`` as a shape parameter.
%(after_notes)s
References
----------
F.R.S. de Gusmao, E.M.M Ortega and G.M. Cordeiro, "The generalized inverse
Weibull distribution", Stat. Papers, vol. 52, pp. 591-619, 2011.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c):
xc1 = np.power(x, -c - 1.0)
xc2 = np.power(x, -c)
xc2 = np.exp(-xc2)
return c * xc1 * xc2
def _cdf(self, x, c):
xc1 = np.power(x, -c)
return np.exp(-xc1)
def _ppf(self, q, c):
return np.power(-np.log(q), -1.0/c)
def _munp(self, n, c):
return sc.gamma(1 - n / c)
def _entropy(self, c):
return 1+_EULER + _EULER / c - np.log(c)
invweibull = invweibull_gen(a=0, name='invweibull')
class johnsonsb_gen(rv_continuous):
"""A Johnson SB continuous random variable.
%(before_notes)s
See Also
--------
johnsonsu
Notes
-----
The probability density function for `johnsonsb` is::
johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x)))
for ``0 < x < 1`` and ``a, b > 0``, and ``phi`` is the normal pdf.
`johnsonsb` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
trm = _norm_pdf(a + b*np.log(x/(1.0-x)))
return b*1.0/(x*(1-x))*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b*np.log(x/(1.0-x)))
def _ppf(self, q, a, b):
return 1.0 / (1 + np.exp(-1.0 / b * (_norm_ppf(q) - a)))
johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonsb')
class johnsonsu_gen(rv_continuous):
"""A Johnson SU continuous random variable.
%(before_notes)s
See Also
--------
johnsonsb
Notes
-----
The probability density function for `johnsonsu` is::
johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) *
phi(a + b * log(x + sqrt(x**2 + 1)))
for all ``x, a, b > 0``, and `phi` is the normal pdf.
`johnsonsu` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
x2 = x*x
trm = _norm_pdf(a + b * np.log(x + np.sqrt(x2+1)))
return b*1.0/np.sqrt(x2+1.0)*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b * np.log(x + np.sqrt(x*x + 1)))
def _ppf(self, q, a, b):
return np.sinh((_norm_ppf(q) - a) / b)
johnsonsu = johnsonsu_gen(name='johnsonsu')
class laplace_gen(rv_continuous):
"""A Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `laplace` is::
laplace.pdf(x) = 1/2 * exp(-abs(x))
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.laplace(0, 1, size=self._size)
def _pdf(self, x):
return 0.5*np.exp(-abs(x))
def _cdf(self, x):
return np.where(x > 0, 1.0-0.5*np.exp(-x), 0.5*np.exp(x))
def _ppf(self, q):
return np.where(q > 0.5, -np.log(2*(1-q)), np.log(2*q))
def _stats(self):
return 0, 2, 0, 3
def _entropy(self):
return np.log(2)+1
laplace = laplace_gen(name='laplace')
class levy_gen(rv_continuous):
"""A Levy continuous random variable.
%(before_notes)s
See Also
--------
levy_stable, levy_l
Notes
-----
The probability density function for `levy` is::
levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x))
for ``x > 0``.
This is the same as the Levy-stable distribution with a=1/2 and b=1.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
return 1 / np.sqrt(2*np.pi*x) / x * np.exp(-1/(2*x))
def _cdf(self, x):
# Equivalent to 2*norm.sf(np.sqrt(1/x))
return sc.erfc(np.sqrt(0.5 / x))
def _ppf(self, q):
# Equivalent to 1.0/(norm.isf(q/2)**2) or 0.5/(erfcinv(q)**2)
val = -sc.ndtri(q/2)
return 1.0 / (val * val)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy = levy_gen(a=0.0, name="levy")
class levy_l_gen(rv_continuous):
"""A left-skewed Levy continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_stable
Notes
-----
The probability density function for `levy_l` is::
levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x)))
for ``x < 0``.
This is the same as the Levy-stable distribution with a=1/2 and b=-1.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
ax = abs(x)
return 1/np.sqrt(2*np.pi*ax)/ax*np.exp(-1/(2*ax))
def _cdf(self, x):
ax = abs(x)
return 2 * _norm_cdf(1 / np.sqrt(ax)) - 1
def _ppf(self, q):
val = _norm_ppf((q + 1.0) / 2)
return -1.0 / (val * val)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy_l = levy_l_gen(b=0.0, name="levy_l")
class levy_stable_gen(rv_continuous):
"""A Levy-stable continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_l
Notes
-----
Levy-stable distribution (only random variates available -- ignore other
docs)
%(after_notes)s
%(example)s
"""
def _rvs(self, alpha, beta):
def alpha1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
return (2/np.pi*(np.pi/2 + bTH)*tanTH -
beta*np.log((np.pi/2*W*cosTH)/(np.pi/2 + bTH)))
def beta0func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
return (W/(cosTH/np.tan(aTH) + np.sin(TH)) *
((np.cos(aTH) + np.sin(aTH)*tanTH)/W)**(1.0/alpha))
def otherwise(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
# alpha is not 1 and beta is not 0
val0 = beta*np.tan(np.pi*alpha/2)
th0 = np.arctan(val0)/alpha
val3 = W/(cosTH/np.tan(alpha*(th0 + TH)) + np.sin(TH))
res3 = val3*((np.cos(aTH) + np.sin(aTH)*tanTH -
val0*(np.sin(aTH) - np.cos(aTH)*tanTH))/W)**(1.0/alpha)
return res3
def alphanot1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
res = _lazywhere(beta == 0,
(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
beta0func, f2=otherwise)
return res
sz = self._size
alpha = broadcast_to(alpha, sz)
beta = broadcast_to(beta, sz)
TH = uniform.rvs(loc=-np.pi/2.0, scale=np.pi, size=sz,
random_state=self._random_state)
W = expon.rvs(size=sz, random_state=self._random_state)
aTH = alpha*TH
bTH = beta*TH
cosTH = np.cos(TH)
tanTH = np.tan(TH)
res = _lazywhere(alpha == 1, (alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
alpha1func, f2=alphanot1func)
return res
def _argcheck(self, alpha, beta):
return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1)
def _pdf(self, x, alpha, beta):
raise NotImplementedError
levy_stable = levy_stable_gen(name='levy_stable')
class logistic_gen(rv_continuous):
"""A logistic (or Sech-squared) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `logistic` is::
logistic.pdf(x) = exp(-x) / (1+exp(-x))**2
`logistic` is a special case of `genlogistic` with ``c == 1``.
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.logistic(size=self._size)
def _pdf(self, x):
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return -x - 2. * sc.log1p(np.exp(-x))
def _cdf(self, x):
return sc.expit(x)
def _ppf(self, q):
return sc.logit(q)
def _sf(self, x):
return sc.expit(-x)
def _isf(self, q):
return -sc.logit(q)
def _stats(self):
return 0, np.pi*np.pi/3.0, 0, 6.0/5.0
def _entropy(self):
# http://en.wikipedia.org/wiki/Logistic_distribution
return 2.0
logistic = logistic_gen(name='logistic')
class loggamma_gen(rv_continuous):
"""A log gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loggamma` is::
loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c)
for all ``x, c > 0``.
`loggamma` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, c):
return np.log(self._random_state.gamma(c, size=self._size))
def _pdf(self, x, c):
return np.exp(c*x-np.exp(x)-sc.gammaln(c))
def _cdf(self, x, c):
return sc.gammainc(c, np.exp(x))
def _ppf(self, q, c):
return np.log(sc.gammaincinv(c, q))
def _stats(self, c):
# See, for example, "A Statistical Study of Log-Gamma Distribution", by
# Ping Shing Chan (thesis, McMaster University, 1993).
mean = sc.digamma(c)
var = sc.polygamma(1, c)
skewness = sc.polygamma(2, c) / np.power(var, 1.5)
excess_kurtosis = sc.polygamma(3, c) / (var*var)
return mean, var, skewness, excess_kurtosis
loggamma = loggamma_gen(name='loggamma')
class loglaplace_gen(rv_continuous):
"""A log-Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loglaplace` is::
loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1
= c / 2 * x**(-c-1), for x >= 1
for ``c > 0``.
`loglaplace` takes ``c`` as a shape parameter.
%(after_notes)s
References
----------
T.J. Kozubowski and K. Podgorski, "A log-Laplace growth rate model",
The Mathematical Scientist, vol. 28, pp. 49-60, 2003.
%(example)s
"""
def _pdf(self, x, c):
cd2 = c/2.0
c = np.where(x < 1, c, -c)
return cd2*x**(c-1)
def _cdf(self, x, c):
return np.where(x < 1, 0.5*x**c, 1-0.5*x**(-c))
def _ppf(self, q, c):
return np.where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c))
def _munp(self, n, c):
return c**2 / (c**2 - n**2)
def _entropy(self, c):
return np.log(2.0/c) + 1.0
loglaplace = loglaplace_gen(a=0.0, name='loglaplace')
def _lognorm_logpdf(x, s):
return _lazywhere(x != 0, (x, s),
lambda x, s: -np.log(x)**2 / (2*s**2) - np.log(s*x*np.sqrt(2*np.pi)),
-np.inf)
class lognorm_gen(rv_continuous):
"""A lognormal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lognorm` is::
lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
for ``x > 0``, ``s > 0``.
`lognorm` takes ``s`` as a shape parameter.
%(after_notes)s
A common parametrization for a lognormal random variable ``Y`` is in
terms of the mean, ``mu``, and standard deviation, ``sigma``, of the
unique normally distributed random variable ``X`` such that exp(X) = Y.
This parametrization corresponds to setting ``s = sigma`` and ``scale =
exp(mu)``.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, s):
return np.exp(s * self._random_state.standard_normal(self._size))
def _pdf(self, x, s):
return np.exp(self._logpdf(x, s))
def _logpdf(self, x, s):
return _lognorm_logpdf(x, s)
def _cdf(self, x, s):
return _norm_cdf(np.log(x) / s)
def _logcdf(self, x, s):
return _norm_logcdf(np.log(x) / s)
def _ppf(self, q, s):
return np.exp(s * _norm_ppf(q))
def _sf(self, x, s):
return _norm_sf(np.log(x) / s)
def _logsf(self, x, s):
return _norm_logsf(np.log(x) / s)
def _stats(self, s):
p = np.exp(s*s)
mu = np.sqrt(p)
mu2 = p*(p-1)
g1 = np.sqrt((p-1))*(2+p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self, s):
return 0.5 * (1 + np.log(2*np.pi) + 2 * np.log(s))
lognorm = lognorm_gen(a=0.0, name='lognorm')
class gilbrat_gen(rv_continuous):
"""A Gilbrat continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gilbrat` is::
gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2)
`gilbrat` is a special case of `lognorm` with ``s = 1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self):
return np.exp(self._random_state.standard_normal(self._size))
def _pdf(self, x):
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return _lognorm_logpdf(x, 1.0)
def _cdf(self, x):
return _norm_cdf(np.log(x))
def _ppf(self, q):
return np.exp(_norm_ppf(q))
def _stats(self):
p = np.e
mu = np.sqrt(p)
mu2 = p * (p - 1)
g1 = np.sqrt((p - 1)) * (2 + p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self):
return 0.5 * np.log(2 * np.pi) + 0.5
gilbrat = gilbrat_gen(a=0.0, name='gilbrat')
class maxwell_gen(rv_continuous):
"""A Maxwell continuous random variable.
%(before_notes)s
Notes
-----
A special case of a `chi` distribution, with ``df = 3``, ``loc = 0.0``,
and given ``scale = a``, where ``a`` is the parameter used in the
Mathworld description [1]_.
The probability density function for `maxwell` is::
maxwell.pdf(x) = sqrt(2/pi)x**2 * exp(-x**2/2)
for ``x > 0``.
%(after_notes)s
References
----------
.. [1] http://mathworld.wolfram.com/MaxwellDistribution.html
%(example)s
"""
def _rvs(self):
return chi.rvs(3.0, size=self._size, random_state=self._random_state)
def _pdf(self, x):
return np.sqrt(2.0/np.pi)*x*x*np.exp(-x*x/2.0)
def _cdf(self, x):
return sc.gammainc(1.5, x*x/2.0)
def _ppf(self, q):
return np.sqrt(2*sc.gammaincinv(1.5, q))
def _stats(self):
val = 3*np.pi-8
return (2*np.sqrt(2.0/np.pi),
3-8/np.pi,
np.sqrt(2)*(32-10*np.pi)/val**1.5,
(-12*np.pi*np.pi + 160*np.pi - 384) / val**2.0)
def _entropy(self):
return _EULER + 0.5*np.log(2*np.pi)-0.5
maxwell = maxwell_gen(a=0.0, name='maxwell')
class mielke_gen(rv_continuous):
"""A Mielke's Beta-Kappa continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `mielke` is::
mielke.pdf(x, k, s) = k * x**(k-1) / (1+x**s)**(1+k/s)
for ``x > 0``.
`mielke` takes ``k`` and ``s`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, k, s):
return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s)
def _cdf(self, x, k, s):
return x**k / (1.0+x**s)**(k*1.0/s)
def _ppf(self, q, k, s):
qsk = pow(q, s*1.0/k)
return pow(qsk/(1.0-qsk), 1.0/s)
mielke = mielke_gen(a=0.0, name='mielke')
class kappa4_gen(rv_continuous):
"""Kappa 4 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for kappa4 is::
kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
(1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1)
if ``h`` and ``k`` are not equal to 0.
If ``h`` or ``k`` are zero then the pdf can be simplified:
h = 0 and k != 0::
kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
exp(-(1.0 - k*x)**(1.0/k))
h != 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*(1.0 - h*exp(-x))**(1.0/h - 1.0)
h = 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*exp(-exp(-x))
kappa4 takes ``h`` and ``k`` as shape parameters.
The kappa4 distribution returns other distributions when certain
``h`` and ``k`` values are used.
+------+-------------+----------------+------------------+
| h | k=0.0 | k=1.0 | -inf<=k<=inf |
+======+=============+================+==================+
| -1.0 | Logistic | | Generalized |
| | | | Logistic(1) |
| | | | |
| | logistic(x) | | |
+------+-------------+----------------+------------------+
| 0.0 | Gumbel | Reverse | Generalized |
| | | Exponential(2) | Extreme Value |
| | | | |
| | gumbel_r(x) | | genextreme(x, k) |
+------+-------------+----------------+------------------+
| 1.0 | Exponential | Uniform | Generalized |
| | | | Pareto |
| | | | |
| | expon(x) | uniform(x) | genpareto(x, -k) |
+------+-------------+----------------+------------------+
(1) There are at least five generalized logistic distributions.
Four are described here:
https://en.wikipedia.org/wiki/Generalized_logistic_distribution
The "fifth" one is the one kappa4 should match which currently
isn't implemented in scipy:
https://en.wikipedia.org/wiki/Talk:Generalized_logistic_distribution
http://www.mathwave.com/help/easyfit/html/analyses/distributions/gen_logistic.html
(2) This distribution is currently not in scipy.
References
----------
J.C. Finney, "Optimization of a Skewed Logistic Distribution With Respect
to the Kolmogorov-Smirnov Test", A Dissertation Submitted to the Graduate
Faculty of the Louisiana State University and Agricultural and Mechanical
College, (August, 2004),
http://etd.lsu.edu/docs/available/etd-05182004-144851/unrestricted/Finney_dis.pdf
J.R.M. Hosking, "The four-parameter kappa distribution". IBM J. Res.
Develop. 38 (3), 25 1-258 (1994).
B. Kumphon, A. Kaew-Man, P. Seenoi, "A Rainfall Distribution for the Lampao
Site in the Chi River Basin, Thailand", Journal of Water Resource and
Protection, vol. 4, 866-869, (2012).
http://file.scirp.org/pdf/JWARP20121000009_14676002.pdf
C. Winchester, "On Estimation of the Four-Parameter Kappa Distribution", A
Thesis Submitted to Dalhousie University, Halifax, Nova Scotia, (March
2000).
http://www.nlc-bnc.ca/obj/s4/f2/dsk2/ftp01/MQ57336.pdf
%(after_notes)s
%(example)s
"""
def _argcheck(self, h, k):
condlist = [np.logical_and(h > 0, k > 0),
np.logical_and(h > 0, k == 0),
np.logical_and(h > 0, k < 0),
np.logical_and(h <= 0, k > 0),
np.logical_and(h <= 0, k == 0),
np.logical_and(h <= 0, k < 0)]
def f0(h, k):
return (1.0 - h**(-k))/k
def f1(h, k):
return np.log(h)
def f3(h, k):
a = np.empty(np.shape(h))
a[:] = -np.inf
return a
def f5(h, k):
return 1.0/k
self.a = _lazyselect(condlist,
[f0, f1, f0, f3, f3, f5],
[h, k],
default=np.nan)
def f0(h, k):
return 1.0/k
def f1(h, k):
a = np.empty(np.shape(h))
a[:] = np.inf
return a
self.b = _lazyselect(condlist,
[f0, f1, f1, f0, f1, f1],
[h, k],
default=np.nan)
return h == h
def _pdf(self, x, h, k):
return np.exp(self._logpdf(x, h, k))
def _logpdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*(
1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1.0)
logpdf = ...
'''
return (sc.xlog1py(1.0/k - 1.0, -k*x) +
sc.xlog1py(1.0/h - 1.0, -h*(1.0 - k*x)**(1.0/k)))
def f1(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*np.exp(-(
1.0 - k*x)**(1.0/k))
logpdf = ...
'''
return sc.xlog1py(1.0/k - 1.0, -k*x) - (1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''pdf = np.exp(-x)*(1.0 - h*np.exp(-x))**(1.0/h - 1.0)
logpdf = ...
'''
return -x + sc.xlog1py(1.0/h - 1.0, -h*np.exp(-x))
def f3(x, h, k):
'''pdf = np.exp(-x-np.exp(-x))
logpdf = ...
'''
return -x - np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _cdf(self, x, h, k):
return np.exp(self._logcdf(x, h, k))
def _logcdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''cdf = (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*(1.0 - k*x)**(1.0/k))
def f1(x, h, k):
'''cdf = np.exp(-(1.0 - k*x)**(1.0/k))
logcdf = ...
'''
return -(1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''cdf = (1.0 - h*np.exp(-x))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*np.exp(-x))
def f3(x, h, k):
'''cdf = np.exp(-np.exp(-x))
logcdf = ...
'''
return -np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _ppf(self, q, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(q, h, k):
return 1.0/k*(1.0 - ((1.0 - (q**h))/h)**k)
def f1(q, h, k):
return 1.0/k*(1.0 - (-np.log(q))**k)
def f2(q, h, k):
'''ppf = -np.log((1.0 - (q**h))/h)
'''
return -sc.log1p(-(q**h)) + np.log(h)
def f3(q, h, k):
return -np.log(-np.log(q))
return _lazyselect(condlist,
[f0, f1, f2, f3],
[q, h, k],
default=np.nan)
def _stats(self, h, k):
if h >= 0 and k >= 0:
maxr = 5
elif h < 0 and k >= 0:
maxr = int(-1.0/h*k)
elif k < 0:
maxr = int(-1.0/k)
else:
maxr = 5
outputs = [None if r < maxr else np.nan for r in range(1, 5)]
return outputs[:]
kappa4 = kappa4_gen(name='kappa4')
class kappa3_gen(rv_continuous):
"""Kappa 3 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for `kappa` is::
kappa3.pdf(x, a) =
a*[a + x**a]**(-(a + 1)/a), for ``x > 0``
0.0, for ``x <= 0``
`kappa3` takes ``a`` as a shape parameter and ``a > 0``.
References
----------
P.W. Mielke and E.S. Johnson, "Three-Parameter Kappa Distribution Maximum
Likelihood and Likelihood Ratio Tests", Methods in Weather Research,
701-707, (September, 1973),
http://docs.lib.noaa.gov/rescue/mwr/101/mwr-101-09-0701.pdf
B. Kumphon, "Maximum Entropy and Maximum Likelihood Estimation for the
Three-Parameter Kappa Distribution", Open Journal of Statistics, vol 2,
415-419 (2012)
http://file.scirp.org/pdf/OJS20120400011_95789012.pdf
%(after_notes)s
%(example)s
"""
def _argcheck(self, a):
return a > 0
def _pdf(self, x, a):
return a*(a + x**a)**(-1.0/a-1)
def _cdf(self, x, a):
return x*(a + x**a)**(-1.0/a)
def _ppf(self, q, a):
return (a/(q**-a - 1.0))**(1.0/a)
def _stats(self, a):
outputs = [None if i < a else np.nan for i in range(1, 5)]
return outputs[:]
kappa3 = kappa3_gen(a=0.0, name='kappa3')
class nakagami_gen(rv_continuous):
"""A Nakagami continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nakagami` is::
nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) *
x**(2*nu-1) * exp(-nu*x**2)
for ``x > 0``, ``nu > 0``.
`nakagami` takes ``nu`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, nu):
return 2*nu**nu/sc.gamma(nu)*(x**(2*nu-1.0))*np.exp(-nu*x*x)
def _cdf(self, x, nu):
return sc.gammainc(nu, nu*x*x)
def _ppf(self, q, nu):
return np.sqrt(1.0/nu*sc.gammaincinv(nu, q))
def _stats(self, nu):
mu = sc.gamma(nu+0.5)/sc.gamma(nu)/np.sqrt(nu)
mu2 = 1.0-mu*mu
g1 = mu * (1 - 4*nu*mu2) / 2.0 / nu / np.power(mu2, 1.5)
g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
g2 /= nu*mu2**2.0
return mu, mu2, g1, g2
nakagami = nakagami_gen(a=0.0, name="nakagami")
class ncx2_gen(rv_continuous):
"""A non-central chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncx2` is::
ncx2.pdf(x, df, nc) = exp(-(nc+x)/2) * 1/2 * (x/nc)**((df-2)/4)
* I[(df-2)/2](sqrt(nc*x))
for ``x > 0``.
`ncx2` takes ``df`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, df, nc):
return self._random_state.noncentral_chisquare(df, nc, self._size)
def _logpdf(self, x, df, nc):
return _ncx2_log_pdf(x, df, nc)
def _pdf(self, x, df, nc):
return _ncx2_pdf(x, df, nc)
def _cdf(self, x, df, nc):
return _ncx2_cdf(x, df, nc)
def _ppf(self, q, df, nc):
return sc.chndtrix(q, df, nc)
def _stats(self, df, nc):
val = df + 2.0*nc
return (df + nc,
2*val,
np.sqrt(8)*(val+nc)/val**1.5,
12.0*(val+2*nc)/val**2.0)
ncx2 = ncx2_gen(a=0.0, name='ncx2')
class ncf_gen(rv_continuous):
"""A non-central F distribution continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncf` is::
ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2))) *
df1**(df1/2) * df2**(df2/2) * x**(df1/2-1) *
(df2+df1*x)**(-(df1+df2)/2) *
gamma(df1/2)*gamma(1+df2/2) *
L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2))) /
(B(v1/2, v2/2) * gamma((v1+v2)/2))
for ``df1, df2, nc > 0``.
`ncf` takes ``df1``, ``df2`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, dfn, dfd, nc):
return self._random_state.noncentral_f(dfn, dfd, nc, self._size)
def _pdf_skip(self, x, dfn, dfd, nc):
n1, n2 = dfn, dfd
term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + sc.gammaln(n1/2.)+sc.gammaln(1+n2/2.)
term -= sc.gammaln((n1+n2)/2.0)
Px = np.exp(term)
Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1)
Px *= (n2+n1*x)**(-(n1+n2)/2)
Px *= sc.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)), n2/2, n1/2-1)
Px /= sc.beta(n1/2, n2/2)
# This function does not have a return. Drop it for now, the generic
# function seems to work OK.
def _cdf(self, x, dfn, dfd, nc):
return sc.ncfdtr(dfn, dfd, nc, x)
def _ppf(self, q, dfn, dfd, nc):
return sc.ncfdtri(dfn, dfd, nc, q)
def _munp(self, n, dfn, dfd, nc):
val = (dfn * 1.0/dfd)**n
term = sc.gammaln(n+0.5*dfn) + sc.gammaln(0.5*dfd-n) - sc.gammaln(dfd*0.5)
val *= np.exp(-nc / 2.0+term)
val *= sc.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc)
return val
def _stats(self, dfn, dfd, nc):
mu = np.where(dfd <= 2, np.inf, dfd / (dfd-2.0)*(1+nc*1.0/dfn))
mu2 = np.where(dfd <= 4, np.inf, 2*(dfd*1.0/dfn)**2.0 *
((dfn+nc/2.0)**2.0 + (dfn+nc)*(dfd-2.0)) /
((dfd-2.0)**2.0 * (dfd-4.0)))
return mu, mu2, None, None
ncf = ncf_gen(a=0.0, name='ncf')
class t_gen(rv_continuous):
"""A Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `t` is::
gamma((df+1)/2)
t.pdf(x, df) = ---------------------------------------------------
sqrt(pi*df) * gamma(df/2) * (1+x**2/df)**((df+1)/2)
for ``df > 0``.
`t` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df):
return self._random_state.standard_t(df, size=self._size)
def _pdf(self, x, df):
r = np.asarray(df*1.0)
Px = np.exp(sc.gammaln((r+1)/2)-sc.gammaln(r/2))
Px /= np.sqrt(r*np.pi)*(1+(x**2)/r)**((r+1)/2)
return Px
def _logpdf(self, x, df):
r = df*1.0
lPx = sc.gammaln((r+1)/2)-sc.gammaln(r/2)
lPx -= 0.5*np.log(r*np.pi) + (r+1)/2*np.log(1+(x**2)/r)
return lPx
def _cdf(self, x, df):
return sc.stdtr(df, x)
def _sf(self, x, df):
return sc.stdtr(df, -x)
def _ppf(self, q, df):
return sc.stdtrit(df, q)
def _isf(self, q, df):
return -sc.stdtrit(df, q)
def _stats(self, df):
mu2 = _lazywhere(df > 2, (df,),
lambda df: df / (df-2.0),
np.inf)
g1 = np.where(df > 3, 0.0, np.nan)
g2 = _lazywhere(df > 4, (df,),
lambda df: 6.0 / (df-4.0),
np.nan)
return 0, mu2, g1, g2
t = t_gen(name='t')
class nct_gen(rv_continuous):
"""A non-central Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nct` is::
df**(df/2) * gamma(df+1)
nct.pdf(x, df, nc) = ----------------------------------------------------
2**df*exp(nc**2/2) * (df+x**2)**(df/2) * gamma(df/2)
for ``df > 0``.
`nct` takes ``df`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, df, nc):
return (df > 0) & (nc == nc)
def _rvs(self, df, nc):
sz, rndm = self._size, self._random_state
n = norm.rvs(loc=nc, size=sz, random_state=rndm)
c2 = chi2.rvs(df, size=sz, random_state=rndm)
return n * np.sqrt(df) / np.sqrt(c2)
def _pdf(self, x, df, nc):
n = df*1.0
nc = nc*1.0
x2 = x*x
ncx2 = nc*nc*x2
fac1 = n + x2
trm1 = n/2.*np.log(n) + sc.gammaln(n+1)
trm1 -= n*np.log(2)+nc*nc/2.+(n/2.)*np.log(fac1)+sc.gammaln(n/2.)
Px = np.exp(trm1)
valF = ncx2 / (2*fac1)
trm1 = np.sqrt(2)*nc*x*sc.hyp1f1(n/2+1, 1.5, valF)
trm1 /= np.asarray(fac1*sc.gamma((n+1)/2))
trm2 = sc.hyp1f1((n+1)/2, 0.5, valF)
trm2 /= np.asarray(np.sqrt(fac1)*sc.gamma(n/2+1))
Px *= trm1+trm2
return Px
def _cdf(self, x, df, nc):
return sc.nctdtr(df, nc, x)
def _ppf(self, q, df, nc):
return sc.nctdtrit(df, nc, q)
def _stats(self, df, nc, moments='mv'):
#
# See D. Hogben, R.S. Pinkham, and M.B. Wilk,
# 'The moments of the non-central t-distribution'
# Biometrika 48, p. 465 (2961).
# e.g. http://www.jstor.org/stable/2332772 (gated)
#
mu, mu2, g1, g2 = None, None, None, None
gfac = sc.gamma(df/2.-0.5) / sc.gamma(df/2.)
c11 = np.sqrt(df/2.) * gfac
c20 = df / (df-2.)
c22 = c20 - c11*c11
mu = np.where(df > 1, nc*c11, np.inf)
mu2 = np.where(df > 2, c22*nc*nc + c20, np.inf)
if 's' in moments:
c33t = df * (7.-2.*df) / (df-2.) / (df-3.) + 2.*c11*c11
c31t = 3.*df / (df-2.) / (df-3.)
mu3 = (c33t*nc*nc + c31t) * c11*nc
g1 = np.where(df > 3, mu3 / np.power(mu2, 1.5), np.nan)
#kurtosis
if 'k' in moments:
c44 = df*df / (df-2.) / (df-4.)
c44 -= c11*c11 * 2.*df*(5.-df) / (df-2.) / (df-3.)
c44 -= 3.*c11**4
c42 = df / (df-4.) - c11*c11 * (df-1.) / (df-3.)
c42 *= 6.*df / (df-2.)
c40 = 3.*df*df / (df-2.) / (df-4.)
mu4 = c44 * nc**4 + c42*nc**2 + c40
g2 = np.where(df > 4, mu4/mu2**2 - 3., np.nan)
return mu, mu2, g1, g2
nct = nct_gen(name="nct")
class pareto_gen(rv_continuous):
"""A Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pareto` is::
pareto.pdf(x, b) = b / x**(b+1)
for ``x >= 1``, ``b > 0``.
`pareto` takes ``b`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, b):
return b * x**(-b-1)
def _cdf(self, x, b):
return 1 - x**(-b)
def _ppf(self, q, b):
return pow(1-q, -1.0/b)
def _stats(self, b, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
if 'm' in moments:
mask = b > 1
bt = np.extract(mask, b)
mu = valarray(np.shape(b), value=np.inf)
np.place(mu, mask, bt / (bt-1.0))
if 'v' in moments:
mask = b > 2
bt = np.extract(mask, b)
mu2 = valarray(np.shape(b), value=np.inf)
np.place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2)
if 's' in moments:
mask = b > 3
bt = np.extract(mask, b)
g1 = valarray(np.shape(b), value=np.nan)
vals = 2 * (bt + 1.0) * np.sqrt(bt - 2.0) / ((bt - 3.0) * np.sqrt(bt))
np.place(g1, mask, vals)
if 'k' in moments:
mask = b > 4
bt = np.extract(mask, b)
g2 = valarray(np.shape(b), value=np.nan)
vals = (6.0*np.polyval([1.0, 1.0, -6, -2], bt) /
np.polyval([1.0, -7.0, 12.0, 0.0], bt))
np.place(g2, mask, vals)
return mu, mu2, g1, g2
def _entropy(self, c):
return 1 + 1.0/c - np.log(c)
pareto = pareto_gen(a=1.0, name="pareto")
class lomax_gen(rv_continuous):
"""A Lomax (Pareto of the second kind) continuous random variable.
%(before_notes)s
Notes
-----
The Lomax distribution is a special case of the Pareto distribution, with
(loc=-1.0).
The probability density function for `lomax` is::
lomax.pdf(x, c) = c / (1+x)**(c+1)
for ``x >= 0``, ``c > 0``.
`lomax` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c*1.0/(1.0+x)**(c+1.0)
def _logpdf(self, x, c):
return np.log(c) - (c+1)*sc.log1p(x)
def _cdf(self, x, c):
return -sc.expm1(-c*sc.log1p(x))
def _sf(self, x, c):
return np.exp(-c*sc.log1p(x))
def _logsf(self, x, c):
return -c*sc.log1p(x)
def _ppf(self, q, c):
return sc.expm1(-sc.log1p(-q)/c)
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
return mu, mu2, g1, g2
def _entropy(self, c):
return 1+1.0/c-np.log(c)
lomax = lomax_gen(a=0.0, name="lomax")
class pearson3_gen(rv_continuous):
"""A pearson type III continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pearson3` is::
pearson3.pdf(x, skew) = abs(beta) / gamma(alpha) *
(beta * (x - zeta))**(alpha - 1) * exp(-beta*(x - zeta))
where::
beta = 2 / (skew * stddev)
alpha = (stddev * beta)**2
zeta = loc - alpha / beta
`pearson3` takes ``skew`` as a shape parameter.
%(after_notes)s
%(example)s
References
----------
R.W. Vogel and D.E. McMartin, "Probability Plot Goodness-of-Fit and
Skewness Estimation Procedures for the Pearson Type 3 Distribution", Water
Resources Research, Vol.27, 3149-3158 (1991).
L.R. Salvosa, "Tables of Pearson's Type III Function", Ann. Math. Statist.,
Vol.1, 191-198 (1930).
"Using Modern Computing Tools to Fit the Pearson Type III Distribution to
Aviation Loads Data", Office of Aviation Research (2003).
"""
def _preprocess(self, x, skew):
# The real 'loc' and 'scale' are handled in the calling pdf(...). The
# local variables 'loc' and 'scale' within pearson3._pdf are set to
# the defaults just to keep them as part of the equations for
# documentation.
loc = 0.0
scale = 1.0
# If skew is small, return _norm_pdf. The divide between pearson3
# and norm was found by brute force and is approximately a skew of
# 0.000016. No one, I hope, would actually use a skew value even
# close to this small.
norm2pearson_transition = 0.000016
ans, x, skew = np.broadcast_arrays([1.0], x, skew)
ans = ans.copy()
# mask is True where skew is small enough to use the normal approx.
mask = np.absolute(skew) < norm2pearson_transition
invmask = ~mask
beta = 2.0 / (skew[invmask] * scale)
alpha = (scale * beta)**2
zeta = loc - alpha / beta
transx = beta * (x[invmask] - zeta)
return ans, x, transx, mask, invmask, beta, alpha, zeta
def _argcheck(self, skew):
# The _argcheck function in rv_continuous only allows positive
# arguments. The skew argument for pearson3 can be zero (which I want
# to handle inside pearson3._pdf) or negative. So just return True
# for all skew args.
return np.ones(np.shape(skew), dtype=bool)
def _stats(self, skew):
_, _, _, _, _, beta, alpha, zeta = (
self._preprocess([1], skew))
m = zeta + alpha / beta
v = alpha / (beta**2)
s = 2.0 / (alpha**0.5) * np.sign(beta)
k = 6.0 / alpha
return m, v, s, k
def _pdf(self, x, skew):
# Do the calculation in _logpdf since helps to limit
# overflow/underflow problems
ans = np.exp(self._logpdf(x, skew))
if ans.ndim == 0:
if np.isnan(ans):
return 0.0
return ans
ans[np.isnan(ans)] = 0.0
return ans
def _logpdf(self, x, skew):
# PEARSON3 logpdf GAMMA logpdf
# np.log(abs(beta))
# + (alpha - 1)*np.log(beta*(x - zeta)) + (a - 1)*np.log(x)
# - beta*(x - zeta) - x
# - sc.gammalnalpha) - sc.gammalna)
ans, x, transx, mask, invmask, beta, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = np.log(_norm_pdf(x[mask]))
ans[invmask] = np.log(abs(beta)) + gamma._logpdf(transx, alpha)
return ans
def _cdf(self, x, skew):
ans, x, transx, mask, invmask, _, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = _norm_cdf(x[mask])
ans[invmask] = gamma._cdf(transx, alpha)
return ans
def _rvs(self, skew):
skew = broadcast_to(skew, self._size)
ans, _, _, mask, invmask, beta, alpha, zeta = (
self._preprocess([0], skew))
nsmall = mask.sum()
nbig = mask.size - nsmall
ans[mask] = self._random_state.standard_normal(nsmall)
ans[invmask] = (self._random_state.standard_gamma(alpha, nbig)/beta +
zeta)
if self._size == ():
ans = ans[0]
return ans
def _ppf(self, q, skew):
ans, q, _, mask, invmask, beta, alpha, zeta = (
self._preprocess(q, skew))
ans[mask] = _norm_ppf(q[mask])
ans[invmask] = sc.gammaincinv(alpha, q[invmask])/beta + zeta
return ans
pearson3 = pearson3_gen(name="pearson3")
class powerlaw_gen(rv_continuous):
"""A power-function continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlaw` is::
powerlaw.pdf(x, a) = a * x**(a-1)
for ``0 <= x <= 1``, ``a > 0``.
`powerlaw` takes ``a`` as a shape parameter.
%(after_notes)s
`powerlaw` is a special case of `beta` with ``b == 1``.
%(example)s
"""
def _pdf(self, x, a):
return a*x**(a-1.0)
def _logpdf(self, x, a):
return np.log(a) + sc.xlogy(a - 1, x)
def _cdf(self, x, a):
return x**(a*1.0)
def _logcdf(self, x, a):
return a*np.log(x)
def _ppf(self, q, a):
return pow(q, 1.0/a)
def _stats(self, a):
return (a / (a + 1.0),
a / (a + 2.0) / (a + 1.0) ** 2,
-2.0 * ((a - 1.0) / (a + 3.0)) * np.sqrt((a + 2.0) / a),
6 * np.polyval([1, -1, -6, 2], a) / (a * (a + 3.0) * (a + 4)))
def _entropy(self, a):
return 1 - 1.0/a - np.log(a)
powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw")
class powerlognorm_gen(rv_continuous):
"""A power log-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlognorm` is::
powerlognorm.pdf(x, c, s) = c / (x*s) * phi(log(x)/s) *
(Phi(-log(x)/s))**(c-1),
where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf,
and ``x > 0``, ``s, c > 0``.
`powerlognorm` takes ``c`` and ``s`` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c, s):
return (c/(x*s) * _norm_pdf(np.log(x)/s) *
pow(_norm_cdf(-np.log(x)/s), c*1.0-1.0))
def _cdf(self, x, c, s):
return 1.0 - pow(_norm_cdf(-np.log(x)/s), c*1.0)
def _ppf(self, q, c, s):
return np.exp(-s * _norm_ppf(pow(1.0 - q, 1.0 / c)))
powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm")
class powernorm_gen(rv_continuous):
"""A power normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powernorm` is::
powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1)
where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf,
and ``x > 0``, ``c > 0``.
`powernorm` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c*_norm_pdf(x) * (_norm_cdf(-x)**(c-1.0))
def _logpdf(self, x, c):
return np.log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x)
def _cdf(self, x, c):
return 1.0-_norm_cdf(-x)**(c*1.0)
def _ppf(self, q, c):
return -_norm_ppf(pow(1.0 - q, 1.0 / c))
powernorm = powernorm_gen(name='powernorm')
class rdist_gen(rv_continuous):
"""An R-distributed continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rdist` is::
rdist.pdf(x, c) = (1-x**2)**(c/2-1) / B(1/2, c/2)
for ``-1 <= x <= 1``, ``c > 0``.
`rdist` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return np.power((1.0 - x**2), c / 2.0 - 1) / sc.beta(0.5, c / 2.0)
def _cdf(self, x, c):
term1 = x / sc.beta(0.5, c / 2.0)
res = 0.5 + term1 * sc.hyp2f1(0.5, 1 - c / 2.0, 1.5, x**2)
# There's an issue with hyp2f1, it returns nans near x = +-1, c > 100.
# Use the generic implementation in that case. See gh-1285 for
# background.
if np.any(np.isnan(res)):
return rv_continuous._cdf(self, x, c)
return res
def _munp(self, n, c):
numerator = (1 - (n % 2)) * sc.beta((n + 1.0) / 2, c / 2.0)
return numerator / sc.beta(1. / 2, c / 2.)
rdist = rdist_gen(a=-1.0, b=1.0, name="rdist")
class rayleigh_gen(rv_continuous):
"""A Rayleigh continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rayleigh` is::
rayleigh.pdf(r) = r * exp(-r**2/2)
for ``x >= 0``.
`rayleigh` is a special case of `chi` with ``df == 2``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self):
return chi.rvs(2, size=self._size, random_state=self._random_state)
def _pdf(self, r):
return np.exp(self._logpdf(r))
def _logpdf(self, r):
return np.log(r) - 0.5 * r * r
def _cdf(self, r):
return -sc.expm1(-0.5 * r**2)
def _ppf(self, q):
return np.sqrt(-2 * sc.log1p(-q))
def _sf(self, r):
return np.exp(self._logsf(r))
def _logsf(self, r):
return -0.5 * r * r
def _isf(self, q):
return np.sqrt(-2 * np.log(q))
def _stats(self):
val = 4 - np.pi
return (np.sqrt(np.pi/2),
val/2,
2*(np.pi-3)*np.sqrt(np.pi)/val**1.5,
6*np.pi/val-16/val**2)
def _entropy(self):
return _EULER/2.0 + 1 - 0.5*np.log(2)
rayleigh = rayleigh_gen(a=0.0, name="rayleigh")
class reciprocal_gen(rv_continuous):
"""A reciprocal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `reciprocal` is::
reciprocal.pdf(x, a, b) = 1 / (x*log(b/a))
for ``a <= x <= b``, ``a, b > 0``.
`reciprocal` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self.d = np.log(b*1.0 / a)
return (a > 0) & (b > 0) & (b > a)
def _pdf(self, x, a, b):
return 1.0 / (x * self.d)
def _logpdf(self, x, a, b):
return -np.log(x) - np.log(self.d)
def _cdf(self, x, a, b):
return (np.log(x)-np.log(a)) / self.d
def _ppf(self, q, a, b):
return a*pow(b*1.0/a, q)
def _munp(self, n, a, b):
return 1.0/self.d / n * (pow(b*1.0, n) - pow(a*1.0, n))
def _entropy(self, a, b):
return 0.5*np.log(a*b)+np.log(np.log(b/a))
reciprocal = reciprocal_gen(name="reciprocal")
class rice_gen(rv_continuous):
"""A Rice continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rice` is::
rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
for ``x > 0``, ``b > 0``.
`rice` takes ``b`` as a shape parameter.
%(after_notes)s
The Rice distribution describes the length, ``r``, of a 2-D vector
with components ``(U+u, V+v)``, where ``U, V`` are constant, ``u, v``
are independent Gaussian random variables with standard deviation
``s``. Let ``R = (U**2 + V**2)**0.5``. Then the pdf of ``r`` is
``rice.pdf(x, R/s, scale=s)``.
%(example)s
"""
def _argcheck(self, b):
return b >= 0
def _rvs(self, b):
# http://en.wikipedia.org/wiki/Rice_distribution
t = b/np.sqrt(2) + self._random_state.standard_normal(size=(2,) +
self._size)
return np.sqrt((t*t).sum(axis=0))
def _cdf(self, x, b):
return sc.chndtr(np.square(x), 2, np.square(b))
def _ppf(self, q, b):
return np.sqrt(sc.chndtrix(q, 2, np.square(b)))
def _pdf(self, x, b):
# We use (x**2 + b**2)/2 = ((x-b)**2)/2 + xb.
# The factor of np.exp(-xb) is then included in the i0e function
# in place of the modified Bessel function, i0, improving
# numerical stability for large values of xb.
return x * np.exp(-(x-b)*(x-b)/2.0) * sc.i0e(x*b)
def _munp(self, n, b):
nd2 = n/2.0
n1 = 1 + nd2
b2 = b*b/2.0
return (2.0**(nd2) * np.exp(-b2) * sc.gamma(n1) *
sc.hyp1f1(n1, 1, b2))
rice = rice_gen(a=0.0, name="rice")
# FIXME: PPF does not work.
class recipinvgauss_gen(rv_continuous):
"""A reciprocal inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `recipinvgauss` is::
recipinvgauss.pdf(x, mu) = 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2))
for ``x >= 0``.
`recipinvgauss` takes ``mu`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, mu):
return 1.0/np.sqrt(2*np.pi*x)*np.exp(-(1-mu*x)**2.0 / (2*x*mu**2.0))
def _logpdf(self, x, mu):
return -(1-mu*x)**2.0 / (2*x*mu**2.0) - 0.5*np.log(2*np.pi*x)
def _cdf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/np.sqrt(x)
return 1.0-_norm_cdf(isqx*trm1)-np.exp(2.0/mu)*_norm_cdf(-isqx*trm2)
def _rvs(self, mu):
return 1.0/self._random_state.wald(mu, 1.0, size=self._size)
recipinvgauss = recipinvgauss_gen(a=0.0, name='recipinvgauss')
class semicircular_gen(rv_continuous):
"""A semicircular continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `semicircular` is::
semicircular.pdf(x) = 2/pi * sqrt(1-x**2)
for ``-1 <= x <= 1``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 2.0/np.pi*np.sqrt(1-x*x)
def _cdf(self, x):
return 0.5+1.0/np.pi*(x*np.sqrt(1-x*x) + np.arcsin(x))
def _stats(self):
return 0, 0.25, 0, -1.0
def _entropy(self):
return 0.64472988584940017414
semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular")
class skew_norm_gen(rv_continuous):
"""A skew-normal random variable.
%(before_notes)s
Notes
-----
The pdf is::
skewnorm.pdf(x, a) = 2*norm.pdf(x)*norm.cdf(ax)
`skewnorm` takes ``a`` as a skewness parameter
When a=0 the distribution is identical to a normal distribution.
rvs implements the method of [1]_.
%(after_notes)s
%(example)s
References
----------
.. [1] A. Azzalini and A. Capitanio (1999). Statistical applications of the
multivariate skew-normal distribution. J. Roy. Statist. Soc., B 61, 579-602.
http://azzalini.stat.unipd.it/SN/faq-r.html
"""
def _argcheck(self, a):
return np.isfinite(a)
def _pdf(self, x, a):
return 2.*_norm_pdf(x)*_norm_cdf(a*x)
def _rvs(self, a):
u0 = self._random_state.normal(size=self._size)
v = self._random_state.normal(size=self._size)
d = a/np.sqrt(1 + a**2)
u1 = d*u0 + v*np.sqrt(1 - d**2)
return np.where(u0 >= 0, u1, -u1)
def _stats(self, a, moments='mvsk'):
output = [None, None, None, None]
const = np.sqrt(2/np.pi) * a/np.sqrt(1 + a**2)
if 'm' in moments:
output[0] = const
if 'v' in moments:
output[1] = 1 - const**2
if 's' in moments:
output[2] = ((4 - np.pi)/2) * (const/np.sqrt(1 - const**2))**3
if 'k' in moments:
output[3] = (2*(np.pi - 3)) * (const**4/(1 - const**2)**2)
return output
skewnorm = skew_norm_gen(name='skewnorm')
class trapz_gen(rv_continuous):
"""A trapezoidal continuous random variable.
%(before_notes)s
Notes
-----
The trapezoidal distribution can be represented with an up-sloping line
from ``loc`` to ``(loc + c*scale)``, then constant to ``(loc + d*scale)``
and then downsloping from ``(loc + d*scale)`` to ``(loc+scale)``.
`trapz` takes ``c`` and ``d`` as shape parameters.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _argcheck(self, c, d):
return (c >= 0) & (c <= 1) & (d >= 0) & (d <= 1) & (d >= c)
def _pdf(self, x, c, d):
u = 2 / (d - c + 1)
condlist = [x < c, x <= d, x > d]
choicelist = [u * x / c, u, u * (1 - x) / (1 - d)]
return np.select(condlist, choicelist)
def _cdf(self, x, c, d):
condlist = [x < c, x <= d, x > d]
choicelist = [x**2 / c / (d - c + 1),
(c + 2 * (x - c)) / (d - c + 1),
1 - ((1 - x)**2 / (d - c + 1) / (1 - d))]
return np.select(condlist, choicelist)
def _ppf(self, q, c, d):
qc, qd = self._cdf(c, c, d), self._cdf(d, c, d)
condlist = [q < qc, q <= qd, q > qd]
choicelist = [np.sqrt(q * c * (1 + d - c)),
0.5 * q * (1 + d - c) + 0.5 * c,
1 - np.sqrt((1 - q) * (d - c + 1) * (1 - d))]
return np.select(condlist, choicelist)
trapz = trapz_gen(a=0.0, b=1.0, name="trapz")
class triang_gen(rv_continuous):
"""A triangular continuous random variable.
%(before_notes)s
Notes
-----
The triangular distribution can be represented with an up-sloping line from
``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)``
to ``(loc+scale)``.
`triang` takes ``c`` as a shape parameter.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _rvs(self, c):
return self._random_state.triangular(0, c, 1, self._size)
def _argcheck(self, c):
return (c >= 0) & (c <= 1)
def _pdf(self, x, c):
return np.where(x < c, 2*x/c, 2*(1-x)/(1-c))
def _cdf(self, x, c):
return np.where(x < c, x*x/c, (x*x-2*x+c)/(c-1))
def _ppf(self, q, c):
return np.where(q < c, np.sqrt(c*q), 1-np.sqrt((1-c)*(1-q)))
def _stats(self, c):
return ((c+1.0)/3.0,
(1.0-c+c*c)/18,
np.sqrt(2)*(2*c-1)*(c+1)*(c-2) / (5*np.power((1.0-c+c*c), 1.5)),
-3.0/5.0)
def _entropy(self, c):
return 0.5-np.log(2)
triang = triang_gen(a=0.0, b=1.0, name="triang")
class truncexpon_gen(rv_continuous):
"""A truncated exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `truncexpon` is::
truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b))
for ``0 < x < b``.
`truncexpon` takes ``b`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, b):
self.b = b
return b > 0
def _pdf(self, x, b):
return np.exp(-x)/(-sc.expm1(-b))
def _logpdf(self, x, b):
return -x - np.log(-sc.expm1(-b))
def _cdf(self, x, b):
return sc.expm1(-x)/sc.expm1(-b)
def _ppf(self, q, b):
return -sc.log1p(q*sc.expm1(-b))
def _munp(self, n, b):
# wrong answer with formula, same as in continuous.pdf
# return sc.gamman+1)-sc.gammainc1+n, b)
if n == 1:
return (1-(b+1)*np.exp(-b))/(-sc.expm1(-b))
elif n == 2:
return 2*(1-0.5*(b*b+2*b+2)*np.exp(-b))/(-sc.expm1(-b))
else:
# return generic for higher moments
# return rv_continuous._mom1_sc(self, n, b)
return self._mom1_sc(n, b)
def _entropy(self, b):
eB = np.exp(b)
return np.log(eB-1)+(1+eB*(b-1.0))/(1.0-eB)
truncexpon = truncexpon_gen(a=0.0, name='truncexpon')
class truncnorm_gen(rv_continuous):
"""A truncated normal continuous random variable.
%(before_notes)s
Notes
-----
The standard form of this distribution is a standard normal truncated to
the range [a, b] --- notice that a and b are defined over the domain of the
standard normal. To convert clip values for a specific mean and standard
deviation, use::
a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std
`truncnorm` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self._nb = _norm_cdf(b)
self._na = _norm_cdf(a)
self._sb = _norm_sf(b)
self._sa = _norm_sf(a)
self._delta = np.where(self.a > 0,
-(self._sb - self._sa),
self._nb - self._na)
self._logdelta = np.log(self._delta)
return a != b
def _pdf(self, x, a, b):
return _norm_pdf(x) / self._delta
def _logpdf(self, x, a, b):
return _norm_logpdf(x) - self._logdelta
def _cdf(self, x, a, b):
return (_norm_cdf(x) - self._na) / self._delta
def _ppf(self, q, a, b):
# XXX Use _lazywhere...
ppf = np.where(self.a > 0,
_norm_isf(q*self._sb + self._sa*(1.0-q)),
_norm_ppf(q*self._nb + self._na*(1.0-q)))
return ppf
def _stats(self, a, b):
nA, nB = self._na, self._nb
d = nB - nA
pA, pB = _norm_pdf(a), _norm_pdf(b)
mu = (pA - pB) / d # correction sign
mu2 = 1 + (a*pA - b*pB) / d - mu*mu
return mu, mu2, None, None
truncnorm = truncnorm_gen(name='truncnorm')
# FIXME: RVS does not work.
class tukeylambda_gen(rv_continuous):
"""A Tukey-Lamdba continuous random variable.
%(before_notes)s
Notes
-----
A flexible distribution, able to represent and interpolate between the
following distributions:
- Cauchy (lam=-1)
- logistic (lam=0.0)
- approx Normal (lam=0.14)
- u-shape (lam = 0.5)
- uniform from -1 to 1 (lam = 1)
`tukeylambda` takes ``lam`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lam):
return np.ones(np.shape(lam), dtype=bool)
def _pdf(self, x, lam):
Fx = np.asarray(sc.tklmbda(x, lam))
Px = Fx**(lam-1.0) + (np.asarray(1-Fx))**(lam-1.0)
Px = 1.0/np.asarray(Px)
return np.where((lam <= 0) | (abs(x) < 1.0/np.asarray(lam)), Px, 0.0)
def _cdf(self, x, lam):
return sc.tklmbda(x, lam)
def _ppf(self, q, lam):
return sc.boxcox(q, lam) - sc.boxcox1p(-q, lam)
def _stats(self, lam):
return 0, _tlvar(lam), 0, _tlkurt(lam)
def _entropy(self, lam):
def integ(p):
return np.log(pow(p, lam-1)+pow(1-p, lam-1))
return integrate.quad(integ, 0, 1)[0]
tukeylambda = tukeylambda_gen(name='tukeylambda')
class uniform_gen(rv_continuous):
"""A uniform continuous random variable.
This distribution is constant between `loc` and ``loc + scale``.
%(before_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.uniform(0.0, 1.0, self._size)
def _pdf(self, x):
return 1.0*(x == x)
def _cdf(self, x):
return x
def _ppf(self, q):
return q
def _stats(self):
return 0.5, 1.0/12, 0, -1.2
def _entropy(self):
return 0.0
uniform = uniform_gen(a=0.0, b=1.0, name='uniform')
class vonmises_gen(rv_continuous):
"""A Von Mises continuous random variable.
%(before_notes)s
Notes
-----
If `x` is not in range or `loc` is not in range it assumes they are angles
and converts them to [-pi, pi] equivalents.
The probability density function for `vonmises` is::
vonmises.pdf(x, kappa) = exp(kappa * cos(x)) / (2*pi*I[0](kappa))
for ``-pi <= x <= pi``, ``kappa > 0``.
`vonmises` takes ``kappa`` as a shape parameter.
%(after_notes)s
See Also
--------
vonmises_line : The same distribution, defined on a [-pi, pi] segment
of the real line.
%(example)s
"""
def _rvs(self, kappa):
return self._random_state.vonmises(0.0, kappa, size=self._size)
def _pdf(self, x, kappa):
return np.exp(kappa * np.cos(x)) / (2*np.pi*sc.i0(kappa))
def _cdf(self, x, kappa):
return _stats.von_mises_cdf(kappa, x)
def _stats_skip(self, kappa):
return 0, None, 0, None
def _entropy(self, kappa):
return (-kappa * sc.i1(kappa) / sc.i0(kappa) +
np.log(2 * np.pi * sc.i0(kappa)))
vonmises = vonmises_gen(name='vonmises')
vonmises_line = vonmises_gen(a=-np.pi, b=np.pi, name='vonmises_line')
class wald_gen(invgauss_gen):
"""A Wald continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wald` is::
wald.pdf(x) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))
for ``x > 0``.
`wald` is a special case of `invgauss` with ``mu == 1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self):
return self._random_state.wald(1.0, 1.0, size=self._size)
def _pdf(self, x):
return invgauss._pdf(x, 1.0)
def _logpdf(self, x):
return invgauss._logpdf(x, 1.0)
def _cdf(self, x):
return invgauss._cdf(x, 1.0)
def _stats(self):
return 1.0, 1.0, 3.0, 15.0
wald = wald_gen(a=0.0, name="wald")
class wrapcauchy_gen(rv_continuous):
"""A wrapped Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wrapcauchy` is::
wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
for ``0 <= x <= 2*pi``, ``0 < c < 1``.
`wrapcauchy` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return (c > 0) & (c < 1)
def _pdf(self, x, c):
return (1.0-c*c)/(2*np.pi*(1+c*c-2*c*np.cos(x)))
def _cdf(self, x, c):
output = np.zeros(x.shape, dtype=x.dtype)
val = (1.0+c)/(1.0-c)
c1 = x < np.pi
c2 = 1-c1
xp = np.extract(c1, x)
xn = np.extract(c2, x)
if np.any(xn):
valn = np.extract(c2, np.ones_like(x)*val)
xn = 2*np.pi - xn
yn = np.tan(xn/2.0)
on = 1.0-1.0/np.pi*np.arctan(valn*yn)
np.place(output, c2, on)
if np.any(xp):
valp = np.extract(c1, np.ones_like(x)*val)
yp = np.tan(xp/2.0)
op = 1.0/np.pi*np.arctan(valp*yp)
np.place(output, c1, op)
return output
def _ppf(self, q, c):
val = (1.0-c)/(1.0+c)
rcq = 2*np.arctan(val*np.tan(np.pi*q))
rcmq = 2*np.pi-2*np.arctan(val*np.tan(np.pi*(1-q)))
return np.where(q < 1.0/2, rcq, rcmq)
def _entropy(self, c):
return np.log(2*np.pi*(1-c*c))
wrapcauchy = wrapcauchy_gen(a=0.0, b=2*np.pi, name='wrapcauchy')
class gennorm_gen(rv_continuous):
"""A generalized normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gennorm` is [1]_::
beta
gennorm.pdf(x, beta) = --------------- exp(-|x|**beta)
2 gamma(1/beta)
`gennorm` takes ``beta`` as a shape parameter.
For ``beta = 1``, it is identical to a Laplace distribution.
For ``beta = 2``, it is identical to a normal distribution
(with ``scale=1/sqrt(2)``).
See Also
--------
laplace : Laplace distribution
norm : normal distribution
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
%(example)s
"""
def _pdf(self, x, beta):
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(0.5*beta) - sc.gammaln(1.0/beta) - abs(x)**beta
def _cdf(self, x, beta):
c = 0.5 * np.sign(x)
# evaluating (.5 + c) first prevents numerical cancellation
return (0.5 + c) - c * sc.gammaincc(1.0/beta, abs(x)**beta)
def _ppf(self, x, beta):
c = np.sign(x - 0.5)
# evaluating (1. + c) first prevents numerical cancellation
return c * sc.gammainccinv(1.0/beta, (1.0 + c) - 2.0*c*x)**(1.0/beta)
def _sf(self, x, beta):
return self._cdf(-x, beta)
def _isf(self, x, beta):
return -self._ppf(x, beta)
def _stats(self, beta):
c1, c3, c5 = sc.gammaln([1.0/beta, 3.0/beta, 5.0/beta])
return 0., np.exp(c3 - c1), 0., np.exp(c5 + c1 - 2.0*c3) - 3.
def _entropy(self, beta):
return 1. / beta - np.log(.5 * beta) + sc.gammaln(1. / beta)
gennorm = gennorm_gen(name='gennorm')
class halfgennorm_gen(rv_continuous):
"""The upper half of a generalized normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfgennorm` is::
beta
halfgennorm.pdf(x, beta) = ------------- exp(-|x|**beta)
gamma(1/beta)
`gennorm` takes ``beta`` as a shape parameter.
For ``beta = 1``, it is identical to an exponential distribution.
For ``beta = 2``, it is identical to a half normal distribution
(with ``scale=1/sqrt(2)``).
See Also
--------
gennorm : generalized normal distribution
expon : exponential distribution
halfnorm : half normal distribution
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
%(example)s
"""
def _pdf(self, x, beta):
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(beta) - sc.gammaln(1.0/beta) - x**beta
def _cdf(self, x, beta):
return sc.gammainc(1.0/beta, x**beta)
def _ppf(self, x, beta):
return sc.gammaincinv(1.0/beta, x)**(1.0/beta)
def _sf(self, x, beta):
return sc.gammaincc(1.0/beta, x**beta)
def _isf(self, x, beta):
return sc.gammainccinv(1.0/beta, x)**(1.0/beta)
def _entropy(self, beta):
return 1.0/beta - np.log(beta) + sc.gammaln(1.0/beta)
halfgennorm = halfgennorm_gen(a=0, name='halfgennorm')
def _argus_phi(chi):
"""
Utility function for the argus distribution
used in the CDF and norm of the Argus Funktion
"""
return _norm_cdf(chi) - chi * _norm_pdf(chi) - 0.5
class argus_gen(rv_continuous):
"""
Argus distribution
%(before_notes)s
Notes
-----
The probability density function for `argus` is::
argus.pdf(x, chi) = chi**3 / (sqrt(2*pi) * Psi(chi)) * x * sqrt(1-x**2) * exp(- 0.5 * chi**2 * (1 - x**2))
where:
Psi(chi) = Phi(chi) - chi * phi(chi) - 1/2
with Phi and phi being the CDF and PDF of a standard normal distribution, respectively.
`argus` takes ``chi`` as shape a parameter.
References
----------
.. [1] "ARGUS distribution",
https://en.wikipedia.org/wiki/ARGUS_distribution
%(after_notes)s
.. versionadded:: 0.19.0
%(example)s
"""
def _pdf(self, x, chi):
"""
Return PDF of the argus function
"""
y = 1.0 - x**2
return chi**3 / (_norm_pdf_C * _argus_phi(chi)) * x * np.sqrt(y) * np.exp(-chi**2 * y / 2)
def _cdf(self, x, chi):
"""
Return CDF of the argus function
"""
return 1.0 - self._sf(x, chi)
def _sf(self, x, chi):
"""
Return survival function of the argus function
"""
return _argus_phi(chi * np.sqrt(1 - x**2)) / _argus_phi(chi)
argus = argus_gen(name='argus', longname="An Argus Function", a=0.0, b=1.0)
class rv_histogram(rv_continuous):
"""
Generates a distribution given by a histogram.
This is useful to generate a template distribution from a binned
datasample.
As a subclass of the `rv_continuous` class, `rv_histogram` inherits from it
a collection of generic methods (see `rv_continuous` for the full list),
and implements them based on the properties of the provided binned
datasample.
Parameters
----------
histogram : tuple of array_like
Tuple containing two array_like objects
The first containing the content of n bins
The second containing the (n+1) bin boundaries
In particular the return value np.histogram is accepted
Notes
-----
There are no additional shape parameters except for the loc and scale.
The pdf is defined as a stepwise function from the provided histogram
The cdf is a linear interpolation of the pdf.
.. versionadded:: 0.19.0
Examples
--------
Create a scipy.stats distribution from a numpy histogram
>>> import scipy.stats
>>> import numpy as np
>>> data = scipy.stats.norm.rvs(size=100000, loc=0, scale=1.5, random_state=123)
>>> hist = np.histogram(data, bins=100)
>>> hist_dist = scipy.stats.rv_histogram(hist)
Behaves like an ordinary scipy rv_continuous distribution
>>> hist_dist.pdf(1.0)
0.20538577847618705
>>> hist_dist.cdf(2.0)
0.90818568543056499
PDF is zero above (below) the highest (lowest) bin of the histogram,
defined by the max (min) of the original dataset
>>> hist_dist.pdf(np.max(data))
0.0
>>> hist_dist.cdf(np.max(data))
1.0
>>> hist_dist.pdf(np.min(data))
7.7591907244498314e-05
>>> hist_dist.cdf(np.min(data))
0.0
PDF and CDF follow the histogram
>>> import matplotlib.pyplot as plt
>>> X = np.linspace(-5.0, 5.0, 100)
>>> plt.title("PDF from Template")
>>> plt.hist(data, normed=True, bins=100)
>>> plt.plot(X, hist_dist.pdf(X), label='PDF')
>>> plt.plot(X, hist_dist.cdf(X), label='CDF')
>>> plt.show()
"""
_support_mask = rv_continuous._support_mask
def __init__(self, histogram, *args, **kwargs):
"""
Create a new distribution using the given histogram
Parameters
----------
histogram : tuple of array_like
Tuple containing two array_like objects
The first containing the content of n bins
The second containing the (n+1) bin boundaries
In particular the return value np.histogram is accepted
"""
self._histogram = histogram
if len(histogram) != 2:
raise ValueError("Expected length 2 for parameter histogram")
self._hpdf = np.asarray(histogram[0])
self._hbins = np.asarray(histogram[1])
if len(self._hpdf) + 1 != len(self._hbins):
raise ValueError("Number of elements in histogram content "
"and histogram boundaries do not match, "
"expected n and n+1.")
self._hbin_widths = self._hbins[1:] - self._hbins[:-1]
self._hpdf = self._hpdf / float(np.sum(self._hpdf * self._hbin_widths))
self._hcdf = np.cumsum(self._hpdf * self._hbin_widths)
self._hpdf = np.hstack([0.0, self._hpdf, 0.0])
self._hcdf = np.hstack([0.0, self._hcdf])
# Set support
kwargs['a'] = self._hbins[0]
kwargs['b'] = self._hbins[-1]
super(rv_histogram, self).__init__(*args, **kwargs)
def _pdf(self, x):
"""
PDF of the histogram
"""
return self._hpdf[np.searchsorted(self._hbins, x, side='right')]
def _cdf(self, x):
"""
CDF calculated from the histogram
"""
return np.interp(x, self._hbins, self._hcdf)
def _ppf(self, x):
"""
Percentile function calculated from the histogram
"""
return np.interp(x, self._hcdf, self._hbins)
def _munp(self, n):
"""Compute the n-th non-central moment."""
integrals = (self._hbins[1:]**(n+1) - self._hbins[:-1]**(n+1)) / (n+1)
return np.sum(self._hpdf[1:-1] * integrals)
def _entropy(self):
"""Compute entropy of distribution"""
res = _lazywhere(self._hpdf[1:-1] > 0.0,
(self._hpdf[1:-1],),
np.log,
0.0)
return -np.sum(self._hpdf[1:-1] * res * self._hbin_widths)
def _updated_ctor_param(self):
"""
Set the histogram as additional constructor argument
"""
dct = super(rv_histogram, self)._updated_ctor_param()
dct['histogram'] = self._histogram
return dct
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_continuous)
__all__ = _distn_names + _distn_gen_names + ['rv_histogram']
| bsd-3-clause |
Cuuuurzel/KiPyCalc | sympy/functions/special/error_functions.py | 15 | 66870 | """ This module contains various functions that are special cases
of incomplete gamma functions. It should probably be renamed. """
from __future__ import print_function, division
from sympy.core import Add, S, C, sympify, cacheit, pi, I
from sympy.core.function import Function, ArgumentIndexError
from sympy.functions.elementary.miscellaneous import sqrt, root
from sympy.functions.elementary.exponential import exp, log
from sympy.functions.elementary.complexes import polar_lift
from sympy.functions.special.hyper import hyper, meijerg
from sympy.core.compatibility import xrange
# TODO series expansions
# TODO see the "Note:" in Ei
###############################################################################
################################ ERROR FUNCTION ###############################
###############################################################################
class erf(Function):
r"""
The Gauss error function. This function is defined as:
.. math ::
\mathrm{erf}(x) = \frac{2}{\sqrt{\pi}} \int_0^x e^{-t^2} \mathrm{d}t.
Examples
========
>>> from sympy import I, oo, erf
>>> from sympy.abc import z
Several special values are known:
>>> erf(0)
0
>>> erf(oo)
1
>>> erf(-oo)
-1
>>> erf(I*oo)
oo*I
>>> erf(-I*oo)
-oo*I
In general one can pull out factors of -1 and I from the argument:
>>> erf(-z)
-erf(z)
The error function obeys the mirror symmetry:
>>> from sympy import conjugate
>>> conjugate(erf(z))
erf(conjugate(z))
Differentiation with respect to z is supported:
>>> from sympy import diff
>>> diff(erf(z), z)
2*exp(-z**2)/sqrt(pi)
We can numerically evaluate the error function to arbitrary precision
on the whole complex plane:
>>> erf(4).evalf(30)
0.999999984582742099719981147840
>>> erf(-4*I).evalf(30)
-1296959.73071763923152794095062*I
See Also
========
erfc: Complementary error function.
erfi: Imaginary error function.
erf2: Two-argument error function.
erfinv: Inverse error function.
erfcinv: Inverse Complementary error function.
erf2inv: Inverse two-argument error function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] http://dlmf.nist.gov/7
.. [3] http://mathworld.wolfram.com/Erf.html
.. [4] http://functions.wolfram.com/GammaBetaErf/Erf
"""
unbranched = True
def fdiff(self, argindex=1):
if argindex == 1:
return 2*C.exp(-self.args[0]**2)/sqrt(S.Pi)
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return erfinv
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.One
elif arg is S.NegativeInfinity:
return S.NegativeOne
elif arg is S.Zero:
return S.Zero
if arg.func is erfinv:
return arg.args[0]
if arg.func is erfcinv:
return S.One - arg.args[0]
if arg.func is erf2inv and arg.args[0] is S.Zero:
return arg.args[1]
# Try to pull out factors of I
t = arg.extract_multiplicatively(S.ImaginaryUnit)
if t is S.Infinity or t is S.NegativeInfinity:
return arg
# Try to pull out factors of -1
if arg.could_extract_minus_sign():
return -cls(-arg)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
k = C.floor((n - 1)/S(2))
if len(previous_terms) > 2:
return -previous_terms[-2] * x**2 * (n - 2)/(n*k)
else:
return 2*(-1)**k * x**n/(n*C.factorial(k)*sqrt(S.Pi))
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _eval_is_real(self):
return self.args[0].is_real
def _eval_rewrite_as_uppergamma(self, z):
return sqrt(z**2)/z*(S.One - C.uppergamma(S.Half, z**2)/sqrt(S.Pi))
def _eval_rewrite_as_fresnels(self, z):
arg = (S.One - S.ImaginaryUnit)*z/sqrt(pi)
return (S.One + S.ImaginaryUnit)*(fresnelc(arg) - I*fresnels(arg))
def _eval_rewrite_as_fresnelc(self, z):
arg = (S.One - S.ImaginaryUnit)*z/sqrt(pi)
return (S.One + S.ImaginaryUnit)*(fresnelc(arg) - I*fresnels(arg))
def _eval_rewrite_as_meijerg(self, z):
return z/sqrt(pi)*meijerg([S.Half], [], [0], [-S.Half], z**2)
def _eval_rewrite_as_hyper(self, z):
return 2*z/sqrt(pi)*hyper([S.Half], [3*S.Half], -z**2)
def _eval_rewrite_as_expint(self, z):
return sqrt(z**2)/z - z*expint(S.Half, z**2)/sqrt(S.Pi)
def _eval_rewrite_as_tractable(self, z):
return S.One - _erfs(z)*C.exp(-z**2)
def _eval_rewrite_as_erfc(self, z):
return S.One - erfc(z)
def _eval_rewrite_as_erfi(self, z):
return -I*erfi(I*z)
def _eval_as_leading_term(self, x):
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and C.Order(1, x).contains(arg):
return 2*x/sqrt(pi)
else:
return self.func(arg)
def as_real_imag(self, deep=True, **hints):
if self.args[0].is_real:
if deep:
hints['complex'] = False
return (self.expand(deep, **hints), S.Zero)
else:
return (self, S.Zero)
if deep:
x, y = self.args[0].expand(deep, **hints).as_real_imag()
else:
x, y = self.args[0].as_real_imag()
sq = -y**2/x**2
re = S.Half*(self.func(x + x*sqrt(sq)) + self.func(x - x*sqrt(sq)))
im = x/(2*y) * sqrt(sq) * (self.func(x - x*sqrt(sq)) -
self.func(x + x*sqrt(sq)))
return (re, im)
class erfc(Function):
r"""
Complementary Error Function. The function is defined as:
.. math ::
\mathrm{erfc}(x) = \frac{2}{\sqrt{\pi}} \int_x^\infty e^{-t^2} \mathrm{d}t
Examples
========
>>> from sympy import I, oo, erfc
>>> from sympy.abc import z
Several special values are known:
>>> erfc(0)
1
>>> erfc(oo)
0
>>> erfc(-oo)
2
>>> erfc(I*oo)
-oo*I
>>> erfc(-I*oo)
oo*I
The error function obeys the mirror symmetry:
>>> from sympy import conjugate
>>> conjugate(erfc(z))
erfc(conjugate(z))
Differentiation with respect to z is supported:
>>> from sympy import diff
>>> diff(erfc(z), z)
-2*exp(-z**2)/sqrt(pi)
It also follows
>>> erfc(-z)
-erfc(z) + 2
We can numerically evaluate the complementary error function to arbitrary precision
on the whole complex plane:
>>> erfc(4).evalf(30)
0.0000000154172579002800188521596734869
>>> erfc(4*I).evalf(30)
1.0 - 1296959.73071763923152794095062*I
See Also
========
erf: Gaussian error function.
erfi: Imaginary error function.
erf2: Two-argument error function.
erfinv: Inverse error function.
erfcinv: Inverse Complementary error function.
erf2inv: Inverse two-argument error function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] http://dlmf.nist.gov/7
.. [3] http://mathworld.wolfram.com/Erfc.html
.. [4] http://functions.wolfram.com/GammaBetaErf/Erfc
"""
unbranched = True
def fdiff(self, argindex=1):
if argindex == 1:
return -2*C.exp(-self.args[0]**2)/sqrt(S.Pi)
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return erfcinv
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Zero
elif arg is S.Zero:
return S.One
if arg.func is erfinv:
return S.One - arg.args[0]
if arg.func is erfcinv:
return arg.args[0]
# Try to pull out factors of I
t = arg.extract_multiplicatively(S.ImaginaryUnit)
if t is S.Infinity or t is S.NegativeInfinity:
return -arg
# Try to pull out factors of -1
if arg.could_extract_minus_sign():
return S(2) - cls(-arg)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n == 0:
return S.One
elif n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
k = C.floor((n - 1)/S(2))
if len(previous_terms) > 2:
return -previous_terms[-2] * x**2 * (n - 2)/(n*k)
else:
return -2*(-1)**k * x**n/(n*C.factorial(k)*sqrt(S.Pi))
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _eval_is_real(self):
return self.args[0].is_real
def _eval_rewrite_as_tractable(self, z):
return self.rewrite(erf).rewrite("tractable", deep=True)
def _eval_rewrite_as_erf(self, z):
return S.One - erf(z)
def _eval_rewrite_as_erfi(self, z):
return S.One + I*erfi(I*z)
def _eval_rewrite_as_fresnels(self, z):
arg = (S.One - S.ImaginaryUnit)*z/sqrt(pi)
return S.One - (S.One + S.ImaginaryUnit)*(fresnelc(arg) - I*fresnels(arg))
def _eval_rewrite_as_fresnelc(self, z):
arg = (S.One-S.ImaginaryUnit)*z/sqrt(pi)
return S.One - (S.One + S.ImaginaryUnit)*(fresnelc(arg) - I*fresnels(arg))
def _eval_rewrite_as_meijerg(self, z):
return S.One - z/sqrt(pi)*meijerg([S.Half], [], [0], [-S.Half], z**2)
def _eval_rewrite_as_hyper(self, z):
return S.One - 2*z/sqrt(pi)*hyper([S.Half], [3*S.Half], -z**2)
def _eval_rewrite_as_uppergamma(self, z):
return S.One - sqrt(z**2)/z*(S.One - C.uppergamma(S.Half, z**2)/sqrt(S.Pi))
def _eval_rewrite_as_expint(self, z):
return S.One - sqrt(z**2)/z + z*expint(S.Half, z**2)/sqrt(S.Pi)
def _eval_as_leading_term(self, x):
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and C.Order(1, x).contains(arg):
return S.One
else:
return self.func(arg)
def as_real_imag(self, deep=True, **hints):
if self.args[0].is_real:
if deep:
hints['complex'] = False
return (self.expand(deep, **hints), S.Zero)
else:
return (self, S.Zero)
if deep:
x, y = self.args[0].expand(deep, **hints).as_real_imag()
else:
x, y = self.args[0].as_real_imag()
sq = -y**2/x**2
re = S.Half*(self.func(x + x*sqrt(sq)) + self.func(x - x*sqrt(sq)))
im = x/(2*y) * sqrt(sq) * (self.func(x - x*sqrt(sq)) -
self.func(x + x*sqrt(sq)))
return (re, im)
class erfi(Function):
r"""
Imaginary error function. The function erfi is defined as:
.. math ::
\mathrm{erfi}(x) = \frac{2}{\sqrt{\pi}} \int_0^x e^{t^2} \mathrm{d}t
Examples
========
>>> from sympy import I, oo, erfi
>>> from sympy.abc import z
Several special values are known:
>>> erfi(0)
0
>>> erfi(oo)
oo
>>> erfi(-oo)
-oo
>>> erfi(I*oo)
I
>>> erfi(-I*oo)
-I
In general one can pull out factors of -1 and I from the argument:
>>> erfi(-z)
-erfi(z)
>>> from sympy import conjugate
>>> conjugate(erfi(z))
erfi(conjugate(z))
Differentiation with respect to z is supported:
>>> from sympy import diff
>>> diff(erfi(z), z)
2*exp(z**2)/sqrt(pi)
We can numerically evaluate the imaginary error function to arbitrary precision
on the whole complex plane:
>>> erfi(2).evalf(30)
18.5648024145755525987042919132
>>> erfi(-2*I).evalf(30)
-0.995322265018952734162069256367*I
See Also
========
erf: Gaussian error function.
erfc: Complementary error function.
erf2: Two-argument error function.
erfinv: Inverse error function.
erfcinv: Inverse Complementary error function.
erf2inv: Inverse two-argument error function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] http://mathworld.wolfram.com/Erfi.html
.. [3] http://functions.wolfram.com/GammaBetaErf/Erfi
"""
unbranched = True
def fdiff(self, argindex=1):
if argindex == 1:
return 2*C.exp(self.args[0]**2)/sqrt(S.Pi)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, z):
if z.is_Number:
if z is S.NaN:
return S.NaN
elif z is S.Zero:
return S.Zero
elif z is S.Infinity:
return S.Infinity
# Try to pull out factors of -1
if z.could_extract_minus_sign():
return -cls(-z)
# Try to pull out factors of I
nz = z.extract_multiplicatively(I)
if nz is not None:
if nz is S.Infinity:
return I
if nz.func is erfinv:
return I*nz.args[0]
if nz.func is erfcinv:
return I*(S.One - nz.args[0])
if nz.func is erf2inv and nz.args[0] is S.Zero:
return I*nz.args[1]
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
k = C.floor((n - 1)/S(2))
if len(previous_terms) > 2:
return previous_terms[-2] * x**2 * (n - 2)/(n*k)
else:
return 2 * x**n/(n*C.factorial(k)*sqrt(S.Pi))
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _eval_is_real(self):
return self.args[0].is_real
def _eval_rewrite_as_tractable(self, z):
return self.rewrite(erf).rewrite("tractable", deep=True)
def _eval_rewrite_as_erf(self, z):
return -I*erf(I*z)
def _eval_rewrite_as_erfc(self, z):
return I*erfc(I*z) - I
def _eval_rewrite_as_fresnels(self, z):
arg = (S.One + S.ImaginaryUnit)*z/sqrt(pi)
return (S.One - S.ImaginaryUnit)*(fresnelc(arg) - I*fresnels(arg))
def _eval_rewrite_as_fresnelc(self, z):
arg = (S.One + S.ImaginaryUnit)*z/sqrt(pi)
return (S.One - S.ImaginaryUnit)*(fresnelc(arg) - I*fresnels(arg))
def _eval_rewrite_as_meijerg(self, z):
return z/sqrt(pi)*meijerg([S.Half], [], [0], [-S.Half], -z**2)
def _eval_rewrite_as_hyper(self, z):
return 2*z/sqrt(pi)*hyper([S.Half], [3*S.Half], z**2)
def _eval_rewrite_as_uppergamma(self, z):
return sqrt(-z**2)/z*(C.uppergamma(S.Half, -z**2)/sqrt(S.Pi) - S.One)
def _eval_rewrite_as_expint(self, z):
return sqrt(-z**2)/z - z*expint(S.Half, -z**2)/sqrt(S.Pi)
def as_real_imag(self, deep=True, **hints):
if self.args[0].is_real:
if deep:
hints['complex'] = False
return (self.expand(deep, **hints), S.Zero)
else:
return (self, S.Zero)
if deep:
x, y = self.args[0].expand(deep, **hints).as_real_imag()
else:
x, y = self.args[0].as_real_imag()
sq = -y**2/x**2
re = S.Half*(self.func(x + x*sqrt(sq)) + self.func(x - x*sqrt(sq)))
im = x/(2*y) * sqrt(sq) * (self.func(x - x*sqrt(sq)) -
self.func(x + x*sqrt(sq)))
return (re, im)
class erf2(Function):
r"""
Two-argument error function. This function is defined as:
.. math ::
\mathrm{erf2}(x, y) = \frac{2}{\sqrt{\pi}} \int_x^y e^{-t^2} \mathrm{d}t
Examples
========
>>> from sympy import I, oo, erf2
>>> from sympy.abc import x, y
Several special values are known:
>>> erf2(0, 0)
0
>>> erf2(x, x)
0
>>> erf2(x, oo)
-erf(x) + 1
>>> erf2(x, -oo)
-erf(x) - 1
>>> erf2(oo, y)
erf(y) - 1
>>> erf2(-oo, y)
erf(y) + 1
In general one can pull out factors of -1:
>>> erf2(-x, -y)
-erf2(x, y)
The error function obeys the mirror symmetry:
>>> from sympy import conjugate
>>> conjugate(erf2(x, y))
erf2(conjugate(x), conjugate(y))
Differentiation with respect to x, y is supported:
>>> from sympy import diff
>>> diff(erf2(x, y), x)
-2*exp(-x**2)/sqrt(pi)
>>> diff(erf2(x, y), y)
2*exp(-y**2)/sqrt(pi)
See Also
========
erf: Gaussian error function.
erfc: Complementary error function.
erfi: Imaginary error function.
erfinv: Inverse error function.
erfcinv: Inverse Complementary error function.
erf2inv: Inverse two-argument error function.
References
==========
.. [1] http://functions.wolfram.com/GammaBetaErf/Erf2/
"""
def fdiff(self, argindex):
x, y = self.args
if argindex == 1:
return -2*C.exp(-x**2)/sqrt(S.Pi)
elif argindex == 2:
return 2*C.exp(-y**2)/sqrt(S.Pi)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, x, y):
I = S.Infinity
N = S.NegativeInfinity
O = S.Zero
if x is S.NaN or y is S.NaN:
return S.NaN
elif x == y:
return S.Zero
elif (x is I or x is N or x is O) or (y is I or y is N or y is O):
return erf(y) - erf(x)
if y.func is erf2inv and y.args[0] == x:
return y.args[1]
#Try to pull out -1 factor
sign_x = x.could_extract_minus_sign()
sign_y = y.could_extract_minus_sign()
if (sign_x and sign_y):
return -cls(-x, -y)
elif (sign_x or sign_y):
return erf(y)-erf(x)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate(), self.args[1].conjugate())
def _eval_is_real(self):
return self.args[0].is_real and self.args[1].is_real
def _eval_rewrite_as_erf(self, x, y):
return erf(y) - erf(x)
def _eval_rewrite_as_erfc(self, x, y):
return erfc(x) - erfc(y)
def _eval_rewrite_as_erfi(self, x, y):
return I*(erfi(I*x)-erfi(I*y))
def _eval_rewrite_as_fresnels(self, x, y):
return erf(y).rewrite(fresnels) - erf(x).rewrite(fresnels)
def _eval_rewrite_as_fresnelc(self, x, y):
return erf(y).rewrite(fresnelc) - erf(x).rewrite(fresnelc)
def _eval_rewrite_as_meijerg(self, x, y):
return erf(y).rewrite(meijerg) - erf(x).rewrite(meijerg)
def _eval_rewrite_as_hyper(self, x, y):
return erf(y).rewrite(hyper) - erf(x).rewrite(hyper)
def _eval_rewrite_as_uppergamma(self, x, y):
return (sqrt(y**2)/y*(S.One - C.uppergamma(S.Half, y**2)/sqrt(S.Pi)) -
sqrt(x**2)/x*(S.One - C.uppergamma(S.Half, x**2)/sqrt(S.Pi)))
def _eval_rewrite_as_expint(self, x, y):
return erf(y).rewrite(expint) - erf(x).rewrite(expint)
class erfinv(Function):
r"""
Inverse Error Function. The erfinv function is defined as:
.. math ::
\mathrm{erf}(x) = y \quad \Rightarrow \quad \mathrm{erfinv}(y) = x
Examples
========
>>> from sympy import I, oo, erfinv
>>> from sympy.abc import x
Several special values are known:
>>> erfinv(0)
0
>>> erfinv(1)
oo
Differentiation with respect to x is supported:
>>> from sympy import diff
>>> diff(erfinv(x), x)
sqrt(pi)*exp(erfinv(x)**2)/2
We can numerically evaluate the inverse error function to arbitrary precision
on [-1, 1]:
>>> erfinv(0.2).evalf(30)
0.179143454621291692285822705344
See Also
========
erf: Gaussian error function.
erfc: Complementary error function.
erfi: Imaginary error function.
erf2: Two-argument error function.
erfcinv: Inverse Complementary error function.
erf2inv: Inverse two-argument error function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Error_function#Inverse_functions
.. [2] http://functions.wolfram.com/GammaBetaErf/InverseErf/
"""
def fdiff(self, argindex =1):
if argindex == 1:
return sqrt(S.Pi)*C.exp(self.func(self.args[0])**2)*S.Half
else :
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return erf
@classmethod
def eval(cls, z):
if z is S.NaN:
return S.NaN
elif z is S.NegativeOne:
return S.NegativeInfinity
elif z is S.Zero:
return S.Zero
elif z is S.One:
return S.Infinity
if (z.func is erf) and z.args[0].is_real:
return z.args[0]
# Try to pull out factors of -1
nz = z.extract_multiplicatively(-1)
if nz is not None and ((nz.func is erf) and (nz.args[0]).is_real):
return -nz.args[0]
def _eval_rewrite_as_erfcinv(self, z):
return erfcinv(1-z)
class erfcinv (Function):
r"""
Inverse Complementary Error Function. The erfcinv function is defined as:
.. math ::
\mathrm{erfc}(x) = y \quad \Rightarrow \quad \mathrm{erfcinv}(y) = x
Examples
========
>>> from sympy import I, oo, erfcinv
>>> from sympy.abc import x
Several special values are known:
>>> erfcinv(1)
0
>>> erfcinv(0)
oo
Differentiation with respect to x is supported:
>>> from sympy import diff
>>> diff(erfcinv(x), x)
-sqrt(pi)*exp(erfcinv(x)**2)/2
See Also
========
erf: Gaussian error function.
erfc: Complementary error function.
erfi: Imaginary error function.
erf2: Two-argument error function.
erfinv: Inverse error function.
erf2inv: Inverse two-argument error function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Error_function#Inverse_functions
.. [2] http://functions.wolfram.com/GammaBetaErf/InverseErfc/
"""
def fdiff(self, argindex =1):
if argindex == 1:
return -sqrt(S.Pi)*C.exp(self.func(self.args[0])**2)*S.Half
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return erfc
@classmethod
def eval(cls, z):
if z is S.NaN:
return S.NaN
elif z is S.Zero:
return S.Infinity
elif z is S.One:
return S.Zero
elif z == 2:
return S.NegativeInfinity
def _eval_rewrite_as_erfinv(self, z):
return erfinv(1-z)
class erf2inv(Function):
r"""
Two-argument Inverse error function. The erf2inv function is defined as:
.. math ::
\mathrm{erf2}(x, w) = y \quad \Rightarrow \quad \mathrm{erf2inv}(x, y) = w
Examples
========
>>> from sympy import I, oo, erf2inv, erfinv, erfcinv
>>> from sympy.abc import x, y
Several special values are known:
>>> erf2inv(0, 0)
0
>>> erf2inv(1, 0)
1
>>> erf2inv(0, 1)
oo
>>> erf2inv(0, y)
erfinv(y)
>>> erf2inv(oo, y)
erfcinv(-y)
Differentiation with respect to x and y is supported:
>>> from sympy import diff
>>> diff(erf2inv(x, y), x)
exp(-x**2 + erf2inv(x, y)**2)
>>> diff(erf2inv(x, y), y)
sqrt(pi)*exp(erf2inv(x, y)**2)/2
See Also
========
erf: Gaussian error function.
erfc: Complementary error function.
erfi: Imaginary error function.
erf2: Two-argument error function.
erfinv: Inverse error function.
erfcinv: Inverse complementary error function.
References
==========
.. [1] http://functions.wolfram.com/GammaBetaErf/InverseErf2/
"""
def fdiff(self, argindex):
x, y = self.args
if argindex == 1:
return C.exp(self.func(x,y)**2-x**2)
elif argindex == 2:
return sqrt(S.Pi)*S.Half*C.exp(self.func(x,y)**2)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, x, y):
if x is S.NaN or y is S.NaN:
return S.NaN
elif x is S.Zero and y is S.Zero:
return S.Zero
elif x is S.Zero and y is S.One:
return S.Infinity
elif x is S.One and y is S.Zero:
return S.One
elif x is S.Zero:
return erfinv(y)
elif x is S.Infinity:
return erfcinv(-y)
elif y is S.Zero:
return x
elif y is S.Infinity:
return erfinv(x)
###############################################################################
#################### EXPONENTIAL INTEGRALS ####################################
###############################################################################
class Ei(Function):
r"""
The classical exponential integral.
For use in SymPy, this function is defined as
.. math:: \operatorname{Ei}(x) = \sum_{n=1}^\infty \frac{x^n}{n\, n!}
+ \log(x) + \gamma,
where `\gamma` is the Euler-Mascheroni constant.
If `x` is a polar number, this defines an analytic function on the
Riemann surface of the logarithm. Otherwise this defines an analytic
function in the cut plane `\mathbb{C} \setminus (-\infty, 0]`.
**Background**
The name *exponential integral* comes from the following statement:
.. math:: \operatorname{Ei}(x) = \int_{-\infty}^x \frac{e^t}{t} \mathrm{d}t
If the integral is interpreted as a Cauchy principal value, this statement
holds for `x > 0` and `\operatorname{Ei}(x)` as defined above.
Note that we carefully avoided defining `\operatorname{Ei}(x)` for
negative real `x`. This is because above integral formula does not hold for
any polar lift of such `x`, indeed all branches of
`\operatorname{Ei}(x)` above the negative reals are imaginary.
However, the following statement holds for all `x \in \mathbb{R}^*`:
.. math:: \int_{-\infty}^x \frac{e^t}{t} \mathrm{d}t =
\frac{\operatorname{Ei}\left(|x|e^{i \arg(x)}\right) +
\operatorname{Ei}\left(|x|e^{- i \arg(x)}\right)}{2},
where the integral is again understood to be a principal value if
`x > 0`, and `|x|e^{i \arg(x)}`,
`|x|e^{- i \arg(x)}` denote two conjugate polar lifts of `x`.
Examples
========
>>> from sympy import Ei, polar_lift, exp_polar, I, pi
>>> from sympy.abc import x
The exponential integral in SymPy is strictly undefined for negative values
of the argument. For convenience, exponential integrals with negative
arguments are immediately converted into an expression that agrees with
the classical integral definition:
>>> Ei(-1)
-I*pi + Ei(exp_polar(I*pi))
This yields a real value:
>>> Ei(-1).n(chop=True)
-0.219383934395520
On the other hand the analytic continuation is not real:
>>> Ei(polar_lift(-1)).n(chop=True)
-0.21938393439552 + 3.14159265358979*I
The exponential integral has a logarithmic branch point at the origin:
>>> Ei(x*exp_polar(2*I*pi))
Ei(x) + 2*I*pi
Differentiation is supported:
>>> Ei(x).diff(x)
exp(x)/x
The exponential integral is related to many other special functions.
For example:
>>> from sympy import uppergamma, expint, Shi
>>> Ei(x).rewrite(expint)
-expint(1, x*exp_polar(I*pi)) - I*pi
>>> Ei(x).rewrite(Shi)
Chi(x) + Shi(x)
See Also
========
expint: Generalised exponential integral.
E1: Special case of the generalised exponential integral.
li: Logarithmic integral.
Li: Offset logarithmic integral.
Si: Sine integral.
Ci: Cosine integral.
Shi: Hyperbolic sine integral.
Chi: Hyperbolic cosine integral.
sympy.functions.special.gamma_functions.uppergamma: Upper incomplete gamma function.
References
==========
.. [1] http://dlmf.nist.gov/6.6
.. [2] http://en.wikipedia.org/wiki/Exponential_integral
.. [3] Abramowitz & Stegun, section 5: http://people.math.sfu.ca/~cbm/aands/page_228.htm
"""
@classmethod
def eval(cls, z):
if not z.is_polar and z.is_negative:
# Note: is this a good idea?
return Ei(polar_lift(z)) - pi*I
nz, n = z.extract_branch_factor()
if n:
return Ei(nz) + 2*I*pi*n
def fdiff(self, argindex=1):
from sympy import unpolarify
arg = unpolarify(self.args[0])
if argindex == 1:
return C.exp(arg)/arg
else:
raise ArgumentIndexError(self, argindex)
def _eval_evalf(self, prec):
if (self.args[0]/polar_lift(-1)).is_positive:
return Function._eval_evalf(self, prec) + (I*pi)._eval_evalf(prec)
return Function._eval_evalf(self, prec)
def _eval_rewrite_as_uppergamma(self, z):
from sympy import uppergamma
# XXX this does not currently work usefully because uppergamma
# immediately turns into expint
return -uppergamma(0, polar_lift(-1)*z) - I*pi
def _eval_rewrite_as_expint(self, z):
return -expint(1, polar_lift(-1)*z) - I*pi
def _eval_rewrite_as_li(self, z):
if isinstance(z, log):
return li(z.args[0])
# TODO:
# Actually it only holds that:
# Ei(z) = li(exp(z))
# for -pi < imag(z) <= pi
return li(exp(z))
def _eval_rewrite_as_Si(self, z):
return Shi(z) + Chi(z)
_eval_rewrite_as_Ci = _eval_rewrite_as_Si
_eval_rewrite_as_Chi = _eval_rewrite_as_Si
_eval_rewrite_as_Shi = _eval_rewrite_as_Si
def _eval_rewrite_as_tractable(self, z):
return C.exp(z) * _eis(z)
def _eval_nseries(self, x, n, logx):
x0 = self.args[0].limit(x, 0)
if x0 is S.Zero:
f = self._eval_rewrite_as_Si(*self.args)
return f._eval_nseries(x, n, logx)
return super(Ei, self)._eval_nseries(x, n, logx)
class expint(Function):
r"""
Generalized exponential integral.
This function is defined as
.. math:: \operatorname{E}_\nu(z) = z^{\nu - 1} \Gamma(1 - \nu, z),
where `\Gamma(1 - \nu, z)` is the upper incomplete gamma function
(``uppergamma``).
Hence for :math:`z` with positive real part we have
.. math:: \operatorname{E}_\nu(z)
= \int_1^\infty \frac{e^{-zt}}{z^\nu} \mathrm{d}t,
which explains the name.
The representation as an incomplete gamma function provides an analytic
continuation for :math:`\operatorname{E}_\nu(z)`. If :math:`\nu` is a
non-positive integer the exponential integral is thus an unbranched
function of :math:`z`, otherwise there is a branch point at the origin.
Refer to the incomplete gamma function documentation for details of the
branching behavior.
Examples
========
>>> from sympy import expint, S
>>> from sympy.abc import nu, z
Differentiation is supported. Differentiation with respect to z explains
further the name: for integral orders, the exponential integral is an
iterated integral of the exponential function.
>>> expint(nu, z).diff(z)
-expint(nu - 1, z)
Differentiation with respect to nu has no classical expression:
>>> expint(nu, z).diff(nu)
-z**(nu - 1)*meijerg(((), (1, 1)), ((0, 0, -nu + 1), ()), z)
At non-postive integer orders, the exponential integral reduces to the
exponential function:
>>> expint(0, z)
exp(-z)/z
>>> expint(-1, z)
exp(-z)/z + exp(-z)/z**2
At half-integers it reduces to error functions:
>>> expint(S(1)/2, z)
-sqrt(pi)*erf(sqrt(z))/sqrt(z) + sqrt(pi)/sqrt(z)
At positive integer orders it can be rewritten in terms of exponentials
and expint(1, z). Use expand_func() to do this:
>>> from sympy import expand_func
>>> expand_func(expint(5, z))
z**4*expint(1, z)/24 + (-z**3 + z**2 - 2*z + 6)*exp(-z)/24
The generalised exponential integral is essentially equivalent to the
incomplete gamma function:
>>> from sympy import uppergamma
>>> expint(nu, z).rewrite(uppergamma)
z**(nu - 1)*uppergamma(-nu + 1, z)
As such it is branched at the origin:
>>> from sympy import exp_polar, pi, I
>>> expint(4, z*exp_polar(2*pi*I))
I*pi*z**3/3 + expint(4, z)
>>> expint(nu, z*exp_polar(2*pi*I))
z**(nu - 1)*(exp(2*I*pi*nu) - 1)*gamma(-nu + 1) + expint(nu, z)
See Also
========
Ei: Another related function called exponential integral.
E1: The classical case, returns expint(1, z).
li: Logarithmic integral.
Li: Offset logarithmic integral.
Si: Sine integral.
Ci: Cosine integral.
Shi: Hyperbolic sine integral.
Chi: Hyperbolic cosine integral.
sympy.functions.special.gamma_functions.uppergamma
References
==========
.. [1] http://dlmf.nist.gov/8.19
.. [2] http://functions.wolfram.com/GammaBetaErf/ExpIntegralE/
.. [3] http://en.wikipedia.org/wiki/Exponential_integral
"""
@classmethod
def eval(cls, nu, z):
from sympy import (unpolarify, expand_mul, uppergamma, exp, gamma,
factorial)
nu2 = unpolarify(nu)
if nu != nu2:
return expint(nu2, z)
if nu.is_Integer and nu <= 0 or (not nu.is_Integer and (2*nu).is_Integer):
return unpolarify(expand_mul(z**(nu - 1)*uppergamma(1 - nu, z)))
# Extract branching information. This can be deduced from what is
# explained in lowergamma.eval().
z, n = z.extract_branch_factor()
if n == 0:
return
if nu.is_integer:
if (nu > 0) != True:
return
return expint(nu, z) \
- 2*pi*I*n*(-1)**(nu - 1)/factorial(nu - 1)*unpolarify(z)**(nu - 1)
else:
return (exp(2*I*pi*nu*n) - 1)*z**(nu - 1)*gamma(1 - nu) + expint(nu, z)
def fdiff(self, argindex):
from sympy import meijerg
nu, z = self.args
if argindex == 1:
return -z**(nu - 1)*meijerg([], [1, 1], [0, 0, 1 - nu], [], z)
elif argindex == 2:
return -expint(nu - 1, z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_uppergamma(self, nu, z):
from sympy import uppergamma
return z**(nu - 1)*uppergamma(1 - nu, z)
def _eval_rewrite_as_Ei(self, nu, z):
from sympy import exp_polar, unpolarify, exp, factorial
if nu == 1:
return -Ei(z*exp_polar(-I*pi)) - I*pi
elif nu.is_Integer and nu > 1:
# DLMF, 8.19.7
x = -unpolarify(z)
return x**(nu - 1)/factorial(nu - 1)*E1(z).rewrite(Ei) + \
exp(x)/factorial(nu - 1) * \
Add(*[factorial(nu - k - 2)*x**k for k in range(nu - 1)])
else:
return self
def _eval_expand_func(self, **hints):
return self.rewrite(Ei).rewrite(expint, **hints)
def _eval_rewrite_as_Si(self, nu, z):
if nu != 1:
return self
return Shi(z) - Chi(z)
_eval_rewrite_as_Ci = _eval_rewrite_as_Si
_eval_rewrite_as_Chi = _eval_rewrite_as_Si
_eval_rewrite_as_Shi = _eval_rewrite_as_Si
def _eval_nseries(self, x, n, logx):
if not self.args[0].has(x):
nu = self.args[0]
if nu == 1:
f = self._eval_rewrite_as_Si(*self.args)
return f._eval_nseries(x, n, logx)
elif nu.is_Integer and nu > 1:
f = self._eval_rewrite_as_Ei(*self.args)
return f._eval_nseries(x, n, logx)
return super(expint, self)._eval_nseries(x, n, logx)
def E1(z):
"""
Classical case of the generalized exponential integral.
This is equivalent to ``expint(1, z)``.
See Also
========
Ei: Exponential integral.
expint: Generalised exponential integral.
li: Logarithmic integral.
Li: Offset logarithmic integral.
Si: Sine integral.
Ci: Cosine integral.
Shi: Hyperbolic sine integral.
Chi: Hyperbolic cosine integral.
"""
return expint(1, z)
class li(Function):
r"""
The classical logarithmic integral.
For the use in SymPy, this function is defined as
.. math:: \operatorname{li}(x) = \int_0^x \frac{1}{\log(t)} \mathrm{d}t \,.
Examples
========
>>> from sympy import I, oo, li
>>> from sympy.abc import z
Several special values are known:
>>> li(0)
0
>>> li(1)
-oo
>>> li(oo)
oo
Differentiation with respect to z is supported:
>>> from sympy import diff
>>> diff(li(z), z)
1/log(z)
Defining the `li` function via an integral:
The logarithmic integral can also be defined in terms of Ei:
>>> from sympy import Ei
>>> li(z).rewrite(Ei)
Ei(log(z))
>>> diff(li(z).rewrite(Ei), z)
1/log(z)
We can numerically evaluate the logarithmic integral to arbitrary precision
on the whole complex plane (except the singular points):
>>> li(2).evalf(30)
1.04516378011749278484458888919
>>> li(2*I).evalf(30)
1.0652795784357498247001125598 + 3.08346052231061726610939702133*I
We can even compute Soldner's constant by the help of mpmath:
>>> from sympy.mpmath import findroot
>>> findroot(li, 2)
1.45136923488338
Further transformations include rewriting `li` in terms of
the trigonometric integrals `Si`, `Ci`, `Shi` and `Chi`:
>>> from sympy import Si, Ci, Shi, Chi
>>> li(z).rewrite(Si)
-log(I*log(z)) - log(1/log(z))/2 + log(log(z))/2 + Ci(I*log(z)) + Shi(log(z))
>>> li(z).rewrite(Ci)
-log(I*log(z)) - log(1/log(z))/2 + log(log(z))/2 + Ci(I*log(z)) + Shi(log(z))
>>> li(z).rewrite(Shi)
-log(1/log(z))/2 + log(log(z))/2 + Chi(log(z)) - Shi(log(z))
>>> li(z).rewrite(Chi)
-log(1/log(z))/2 + log(log(z))/2 + Chi(log(z)) - Shi(log(z))
See Also
========
Li: Offset logarithmic integral.
Ei: Exponential integral.
expint: Generalised exponential integral.
E1: Special case of the generalised exponential integral.
Si: Sine integral.
Ci: Cosine integral.
Shi: Hyperbolic sine integral.
Chi: Hyperbolic cosine integral.
References
==========
.. [1] http://en.wikipedia.org/wiki/Logarithmic_integral
.. [2] http://mathworld.wolfram.com/LogarithmicIntegral.html
.. [3] http://dlmf.nist.gov/6
.. [4] http://mathworld.wolfram.com/SoldnersConstant.html
"""
@classmethod
def eval(cls, z):
if z is S.Zero:
return S.Zero
elif z is S.One:
return S.NegativeInfinity
elif z is S.Infinity:
return S.Infinity
def fdiff(self, argindex=1):
arg = self.args[0]
if argindex == 1:
return S.One / C.log(arg)
else:
raise ArgumentIndexError(self, argindex)
def _eval_conjugate(self):
z = self.args[0]
# Exclude values on the branch cut (-oo, 0)
if not (z.is_real and z.is_negative):
return self.func(z.conjugate())
def _eval_rewrite_as_Li(self, z):
return Li(z) + li(2)
def _eval_rewrite_as_Ei(self, z):
return Ei(C.log(z))
def _eval_rewrite_as_uppergamma(self, z):
from sympy import uppergamma
return (-uppergamma(0, -C.log(z)) +
S.Half*(C.log(C.log(z)) - C.log(S.One/C.log(z))) - C.log(-C.log(z)))
def _eval_rewrite_as_Si(self, z):
return (Ci(I*C.log(z)) - I*Si(I*C.log(z)) -
S.Half*(C.log(S.One/C.log(z)) - C.log(C.log(z))) - C.log(I*C.log(z)))
_eval_rewrite_as_Ci = _eval_rewrite_as_Si
def _eval_rewrite_as_Shi(self, z):
return (Chi(C.log(z)) - Shi(C.log(z)) - S.Half*(C.log(S.One/C.log(z)) - C.log(C.log(z))))
_eval_rewrite_as_Chi = _eval_rewrite_as_Shi
def _eval_rewrite_as_hyper(self, z):
return (C.log(z)*hyper((1, 1), (2, 2), C.log(z)) +
S.Half*(C.log(C.log(z)) - C.log(S.One/C.log(z))) + S.EulerGamma)
def _eval_rewrite_as_meijerg(self, z):
return (-C.log(-C.log(z)) - S.Half*(C.log(S.One/C.log(z)) - C.log(C.log(z)))
- meijerg(((), (1,)), ((0, 0), ()), -C.log(z)))
def _eval_rewrite_as_tractable(self, z):
return z * _eis(C.log(z))
class Li(Function):
r"""
The offset logarithmic integral.
For the use in SymPy, this function is defined as
.. math:: \operatorname{Li}(x) = \operatorname{li}(x) - \operatorname{li}(2)
Examples
========
>>> from sympy import I, oo, Li
>>> from sympy.abc import z
The following special value is known:
>>> Li(2)
0
Differentiation with respect to z is supported:
>>> from sympy import diff
>>> diff(Li(z), z)
1/log(z)
The shifted logarithmic integral can be written in terms of `li(z)`:
>>> from sympy import li
>>> Li(z).rewrite(li)
li(z) - li(2)
We can numerically evaluate the logarithmic integral to arbitrary precision
on the whole complex plane (except the singular points):
>>> Li(2).evalf(30)
0
>>> Li(4).evalf(30)
1.92242131492155809316615998938
See Also
========
li: Logarithmic integral.
Ei: Exponential integral.
expint: Generalised exponential integral.
E1: Special case of the generalised exponential integral.
Si: Sine integral.
Ci: Cosine integral.
Shi: Hyperbolic sine integral.
Chi: Hyperbolic cosine integral.
References
==========
.. [1] http://en.wikipedia.org/wiki/Logarithmic_integral
.. [2] http://mathworld.wolfram.com/LogarithmicIntegral.html
.. [3] http://dlmf.nist.gov/6
"""
@classmethod
def eval(cls, z):
if z is S.Infinity:
return S.Infinity
elif z is 2*S.One:
return S.Zero
def fdiff(self, argindex=1):
arg = self.args[0]
if argindex == 1:
return S.One / C.log(arg)
else:
raise ArgumentIndexError(self, argindex)
def _eval_evalf(self, prec):
return self.rewrite(li).evalf(prec)
def _eval_rewrite_as_li(self, z):
return li(z) - li(2)
def _eval_rewrite_as_tractable(self, z):
return self.rewrite(li).rewrite("tractable", deep=True)
###############################################################################
#################### TRIGONOMETRIC INTEGRALS ##################################
###############################################################################
class TrigonometricIntegral(Function):
""" Base class for trigonometric integrals. """
@classmethod
def eval(cls, z):
if z == 0:
return cls._atzero
elif z is S.Infinity:
return cls._atinf
elif z is S.NegativeInfinity:
return cls._atneginf
nz = z.extract_multiplicatively(polar_lift(I))
if nz is None and cls._trigfunc(0) == 0:
nz = z.extract_multiplicatively(I)
if nz is not None:
return cls._Ifactor(nz, 1)
nz = z.extract_multiplicatively(polar_lift(-I))
if nz is not None:
return cls._Ifactor(nz, -1)
nz = z.extract_multiplicatively(polar_lift(-1))
if nz is None and cls._trigfunc(0) == 0:
nz = z.extract_multiplicatively(-1)
if nz is not None:
return cls._minusfactor(nz)
nz, n = z.extract_branch_factor()
if n == 0 and nz == z:
return
return 2*pi*I*n*cls._trigfunc(0) + cls(nz)
def fdiff(self, argindex=1):
from sympy import unpolarify
arg = unpolarify(self.args[0])
if argindex == 1:
return self._trigfunc(arg)/arg
def _eval_rewrite_as_Ei(self, z):
return self._eval_rewrite_as_expint(z).rewrite(Ei)
def _eval_rewrite_as_uppergamma(self, z):
from sympy import uppergamma
return self._eval_rewrite_as_expint(z).rewrite(uppergamma)
def _eval_nseries(self, x, n, logx):
# NOTE this is fairly inefficient
from sympy import log, EulerGamma, Pow
n += 1
if self.args[0].subs(x, 0) != 0:
return super(TrigonometricIntegral, self)._eval_nseries(x, n, logx)
baseseries = self._trigfunc(x)._eval_nseries(x, n, logx)
if self._trigfunc(0) != 0:
baseseries -= 1
baseseries = baseseries.replace(Pow, lambda t, n: t**n/n, simultaneous=False)
if self._trigfunc(0) != 0:
baseseries += EulerGamma + log(x)
return baseseries.subs(x, self.args[0])._eval_nseries(x, n, logx)
class Si(TrigonometricIntegral):
r"""
Sine integral.
This function is defined by
.. math:: \operatorname{Si}(z) = \int_0^z \frac{\sin{t}}{t} \mathrm{d}t.
It is an entire function.
Examples
========
>>> from sympy import Si
>>> from sympy.abc import z
The sine integral is an antiderivative of sin(z)/z:
>>> Si(z).diff(z)
sin(z)/z
It is unbranched:
>>> from sympy import exp_polar, I, pi
>>> Si(z*exp_polar(2*I*pi))
Si(z)
Sine integral behaves much like ordinary sine under multiplication by ``I``:
>>> Si(I*z)
I*Shi(z)
>>> Si(-z)
-Si(z)
It can also be expressed in terms of exponential integrals, but beware
that the latter is branched:
>>> from sympy import expint
>>> Si(z).rewrite(expint)
-I*(-expint(1, z*exp_polar(-I*pi/2))/2 +
expint(1, z*exp_polar(I*pi/2))/2) + pi/2
See Also
========
Ci: Cosine integral.
Shi: Hyperbolic sine integral.
Chi: Hyperbolic cosine integral.
Ei: Exponential integral.
expint: Generalised exponential integral.
E1: Special case of the generalised exponential integral.
li: Logarithmic integral.
Li: Offset logarithmic integral.
References
==========
.. [1] http://en.wikipedia.org/wiki/Trigonometric_integral
"""
_trigfunc = C.sin
_atzero = S(0)
_atinf = pi*S.Half
_atneginf = -pi*S.Half
@classmethod
def _minusfactor(cls, z):
return -Si(z)
@classmethod
def _Ifactor(cls, z, sign):
return I*Shi(z)*sign
def _eval_rewrite_as_expint(self, z):
# XXX should we polarify z?
return pi/2 + (E1(polar_lift(I)*z) - E1(polar_lift(-I)*z))/2/I
class Ci(TrigonometricIntegral):
r"""
Cosine integral.
This function is defined for positive `x` by
.. math:: \operatorname{Ci}(x) = \gamma + \log{x}
+ \int_0^x \frac{\cos{t} - 1}{t} \mathrm{d}t
= -\int_x^\infty \frac{\cos{t}}{t} \mathrm{d}t,
where `\gamma` is the Euler-Mascheroni constant.
We have
.. math:: \operatorname{Ci}(z) =
-\frac{\operatorname{E}_1\left(e^{i\pi/2} z\right)
+ \operatorname{E}_1\left(e^{-i \pi/2} z\right)}{2}
which holds for all polar `z` and thus provides an analytic
continuation to the Riemann surface of the logarithm.
The formula also holds as stated
for `z \in \mathbb{C}` with `\Re(z) > 0`.
By lifting to the principal branch we obtain an analytic function on the
cut complex plane.
Examples
========
>>> from sympy import Ci
>>> from sympy.abc import z
The cosine integral is a primitive of `\cos(z)/z`:
>>> Ci(z).diff(z)
cos(z)/z
It has a logarithmic branch point at the origin:
>>> from sympy import exp_polar, I, pi
>>> Ci(z*exp_polar(2*I*pi))
Ci(z) + 2*I*pi
The cosine integral behaves somewhat like ordinary `\cos` under multiplication by `i`:
>>> from sympy import polar_lift
>>> Ci(polar_lift(I)*z)
Chi(z) + I*pi/2
>>> Ci(polar_lift(-1)*z)
Ci(z) + I*pi
It can also be expressed in terms of exponential integrals:
>>> from sympy import expint
>>> Ci(z).rewrite(expint)
-expint(1, z*exp_polar(-I*pi/2))/2 - expint(1, z*exp_polar(I*pi/2))/2
See Also
========
Si: Sine integral.
Shi: Hyperbolic sine integral.
Chi: Hyperbolic cosine integral.
Ei: Exponential integral.
expint: Generalised exponential integral.
E1: Special case of the generalised exponential integral.
li: Logarithmic integral.
Li: Offset logarithmic integral.
References
==========
.. [1] http://en.wikipedia.org/wiki/Trigonometric_integral
"""
_trigfunc = C.cos
_atzero = S.ComplexInfinity
_atinf = S.Zero
_atneginf = I*pi
@classmethod
def _minusfactor(cls, z):
return Ci(z) + I*pi
@classmethod
def _Ifactor(cls, z, sign):
return Chi(z) + I*pi/2*sign
def _eval_rewrite_as_expint(self, z):
return -(E1(polar_lift(I)*z) + E1(polar_lift(-I)*z))/2
class Shi(TrigonometricIntegral):
r"""
Sinh integral.
This function is defined by
.. math:: \operatorname{Shi}(z) = \int_0^z \frac{\sinh{t}}{t} \mathrm{d}t.
It is an entire function.
Examples
========
>>> from sympy import Shi
>>> from sympy.abc import z
The Sinh integral is a primitive of `\sinh(z)/z`:
>>> Shi(z).diff(z)
sinh(z)/z
It is unbranched:
>>> from sympy import exp_polar, I, pi
>>> Shi(z*exp_polar(2*I*pi))
Shi(z)
The `\sinh` integral behaves much like ordinary `\sinh` under multiplication by `i`:
>>> Shi(I*z)
I*Si(z)
>>> Shi(-z)
-Shi(z)
It can also be expressed in terms of exponential integrals, but beware
that the latter is branched:
>>> from sympy import expint
>>> Shi(z).rewrite(expint)
expint(1, z)/2 - expint(1, z*exp_polar(I*pi))/2 - I*pi/2
See Also
========
Si: Sine integral.
Ci: Cosine integral.
Chi: Hyperbolic cosine integral.
Ei: Exponential integral.
expint: Generalised exponential integral.
E1: Special case of the generalised exponential integral.
li: Logarithmic integral.
Li: Offset logarithmic integral.
References
==========
.. [1] http://en.wikipedia.org/wiki/Trigonometric_integral
"""
_trigfunc = C.sinh
_atzero = S(0)
_atinf = S.Infinity
_atneginf = S.NegativeInfinity
@classmethod
def _minusfactor(cls, z):
return -Shi(z)
@classmethod
def _Ifactor(cls, z, sign):
return I*Si(z)*sign
def _eval_rewrite_as_expint(self, z):
from sympy import exp_polar
# XXX should we polarify z?
return (E1(z) - E1(exp_polar(I*pi)*z))/2 - I*pi/2
class Chi(TrigonometricIntegral):
r"""
Cosh integral.
This function is defined for positive :math:`x` by
.. math:: \operatorname{Chi}(x) = \gamma + \log{x}
+ \int_0^x \frac{\cosh{t} - 1}{t} \mathrm{d}t,
where :math:`\gamma` is the Euler-Mascheroni constant.
We have
.. math:: \operatorname{Chi}(z) = \operatorname{Ci}\left(e^{i \pi/2}z\right)
- i\frac{\pi}{2},
which holds for all polar :math:`z` and thus provides an analytic
continuation to the Riemann surface of the logarithm.
By lifting to the principal branch we obtain an analytic function on the
cut complex plane.
Examples
========
>>> from sympy import Chi
>>> from sympy.abc import z
The `\cosh` integral is a primitive of `\cosh(z)/z`:
>>> Chi(z).diff(z)
cosh(z)/z
It has a logarithmic branch point at the origin:
>>> from sympy import exp_polar, I, pi
>>> Chi(z*exp_polar(2*I*pi))
Chi(z) + 2*I*pi
The `\cosh` integral behaves somewhat like ordinary `\cosh` under multiplication by `i`:
>>> from sympy import polar_lift
>>> Chi(polar_lift(I)*z)
Ci(z) + I*pi/2
>>> Chi(polar_lift(-1)*z)
Chi(z) + I*pi
It can also be expressed in terms of exponential integrals:
>>> from sympy import expint
>>> Chi(z).rewrite(expint)
-expint(1, z)/2 - expint(1, z*exp_polar(I*pi))/2 - I*pi/2
See Also
========
Si: Sine integral.
Ci: Cosine integral.
Shi: Hyperbolic sine integral.
Ei: Exponential integral.
expint: Generalised exponential integral.
E1: Special case of the generalised exponential integral.
li: Logarithmic integral.
Li: Offset logarithmic integral.
References
==========
.. [1] http://en.wikipedia.org/wiki/Trigonometric_integral
"""
_trigfunc = C.cosh
_atzero = S.ComplexInfinity
_atinf = S.Infinity
_atneginf = S.Infinity
@classmethod
def _minusfactor(cls, z):
return Chi(z) + I*pi
@classmethod
def _Ifactor(cls, z, sign):
return Ci(z) + I*pi/2*sign
def _eval_rewrite_as_expint(self, z):
from sympy import exp_polar
return -I*pi/2 - (E1(z) + E1(exp_polar(I*pi)*z))/2
def _latex(self, printer, exp=None):
if len(self.args) != 1:
raise ValueError("Arg length should be 1")
if exp:
return r'\operatorname{Chi}^{%s}{\left (%s \right )}' \
% (printer._print(exp), printer._print(self.args[0]))
else:
return r'\operatorname{Chi}{\left (%s \right )}' \
% printer._print(self.args[0])
@staticmethod
def _latex_no_arg(printer):
return r'\operatorname{Chi}'
###############################################################################
#################### FRESNEL INTEGRALS ########################################
###############################################################################
class FresnelIntegral(Function):
""" Base class for the Fresnel integrals."""
unbranched = True
@classmethod
def eval(cls, z):
# Value at zero
if z is S.Zero:
return S(0)
# Try to pull out factors of -1 and I
prefact = S.One
newarg = z
changed = False
nz = newarg.extract_multiplicatively(-1)
if nz is not None:
prefact = -prefact
newarg = nz
changed = True
nz = newarg.extract_multiplicatively(I)
if nz is not None:
prefact = cls._sign*I*prefact
newarg = nz
changed = True
if changed:
return prefact*cls(newarg)
# Values at positive infinities signs
# if any were extracted automatically
if z is S.Infinity:
return S.Half
elif z is I*S.Infinity:
return cls._sign*I*S.Half
def fdiff(self, argindex=1):
if argindex == 1:
return self._trigfunc(S.Half*pi*self.args[0]**2)
else:
raise ArgumentIndexError(self, argindex)
def _eval_is_real(self):
return self.args[0].is_real
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _as_real_imag(self, deep=True, **hints):
if self.args[0].is_real:
if deep:
hints['complex'] = False
return (self.expand(deep, **hints), S.Zero)
else:
return (self, S.Zero)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
return (re, im)
def as_real_imag(self, deep=True, **hints):
# Fresnel S
# http://functions.wolfram.com/06.32.19.0003.01
# http://functions.wolfram.com/06.32.19.0006.01
# Fresnel C
# http://functions.wolfram.com/06.33.19.0003.01
# http://functions.wolfram.com/06.33.19.0006.01
x, y = self._as_real_imag(deep=deep, **hints)
sq = -y**2/x**2
re = S.Half*(self.func(x + x*sqrt(sq)) + self.func(x - x*sqrt(sq)))
im = x/(2*y) * sqrt(sq) * (self.func(x - x*sqrt(sq)) -
self.func(x + x*sqrt(sq)))
return (re, im)
class fresnels(FresnelIntegral):
r"""
Fresnel integral S.
This function is defined by
.. math:: \operatorname{S}(z) = \int_0^z \sin{\frac{\pi}{2} t^2} \mathrm{d}t.
It is an entire function.
Examples
========
>>> from sympy import I, oo, fresnels
>>> from sympy.abc import z
Several special values are known:
>>> fresnels(0)
0
>>> fresnels(oo)
1/2
>>> fresnels(-oo)
-1/2
>>> fresnels(I*oo)
-I/2
>>> fresnels(-I*oo)
I/2
In general one can pull out factors of -1 and `i` from the argument:
>>> fresnels(-z)
-fresnels(z)
>>> fresnels(I*z)
-I*fresnels(z)
The Fresnel S integral obeys the mirror symmetry
`\overline{S(z)} = S(\bar{z})`:
>>> from sympy import conjugate
>>> conjugate(fresnels(z))
fresnels(conjugate(z))
Differentiation with respect to `z` is supported:
>>> from sympy import diff
>>> diff(fresnels(z), z)
sin(pi*z**2/2)
Defining the Fresnel functions via an integral
>>> from sympy import integrate, pi, sin, gamma, expand_func
>>> integrate(sin(pi*z**2/2), z)
3*fresnels(z)*gamma(3/4)/(4*gamma(7/4))
>>> expand_func(integrate(sin(pi*z**2/2), z))
fresnels(z)
We can numerically evaluate the Fresnel integral to arbitrary precision
on the whole complex plane:
>>> fresnels(2).evalf(30)
0.343415678363698242195300815958
>>> fresnels(-2*I).evalf(30)
0.343415678363698242195300815958*I
See Also
========
fresnelc: Fresnel cosine integral.
References
==========
.. [1] http://en.wikipedia.org/wiki/Fresnel_integral
.. [2] http://dlmf.nist.gov/7
.. [3] http://mathworld.wolfram.com/FresnelIntegrals.html
.. [4] http://functions.wolfram.com/GammaBetaErf/FresnelS
.. [5] The converging factors for the fresnel integrals
by John W. Wrench Jr. and Vicki Alley
"""
_trigfunc = C.sin
_sign = -S.One
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) > 1:
p = previous_terms[-1]
return (-pi**2*x**4*(4*n - 1)/(8*n*(2*n + 1)*(4*n + 3))) * p
else:
return x**3 * (-x**4)**n * (S(2)**(-2*n - 1)*pi**(2*n + 1)) / ((4*n + 3)*C.factorial(2*n + 1))
def _eval_rewrite_as_erf(self, z):
return (S.One + I)/4 * (erf((S.One + I)/2*sqrt(pi)*z) - I*erf((S.One - I)/2*sqrt(pi)*z))
def _eval_rewrite_as_hyper(self, z):
return pi*z**3/6 * hyper([S(3)/4], [S(3)/2, S(7)/4], -pi**2*z**4/16)
def _eval_rewrite_as_meijerg(self, z):
return (pi*z**(S(9)/4) / (sqrt(2)*(z**2)**(S(3)/4)*(-z)**(S(3)/4))
* meijerg([], [1], [S(3)/4], [S(1)/4, 0], -pi**2*z**4/16))
def _eval_aseries(self, n, args0, x, logx):
point = args0[0]
# Expansion at oo
if point is S.Infinity:
z = self.args[0]
# expansion of S(x) = S1(x*sqrt(pi/2)), see reference[5] page 1-8
p = [(-1)**k * C.factorial(4*k + 1) /
(2**(2*k + 2) * z**(4*k + 3) * 2**(2*k)*C.factorial(2*k))
for k in xrange(0, n)]
q = [1/(2*z)] + [(-1)**k * C.factorial(4*k - 1) /
(2**(2*k + 1) * z**(4*k + 1) * 2**(2*k - 1)*C.factorial(2*k - 1))
for k in xrange(1, n)]
p = [-sqrt(2/pi)*t for t in p] + [C.Order(1/z**n, x)]
q = [-sqrt(2/pi)*t for t in q] + [C.Order(1/z**n, x)]
return S.Half + (C.sin(z**2)*Add(*p) + C.cos(z**2)*Add(*q)).subs(x, sqrt(2/pi)*x)
# All other points are not handled
return super(fresnels, self)._eval_aseries(n, args0, x, logx)
class fresnelc(FresnelIntegral):
r"""
Fresnel integral C.
This function is defined by
.. math:: \operatorname{C}(z) = \int_0^z \cos{\frac{\pi}{2} t^2} \mathrm{d}t.
It is an entire function.
Examples
========
>>> from sympy import I, oo, fresnelc
>>> from sympy.abc import z
Several special values are known:
>>> fresnelc(0)
0
>>> fresnelc(oo)
1/2
>>> fresnelc(-oo)
-1/2
>>> fresnelc(I*oo)
I/2
>>> fresnelc(-I*oo)
-I/2
In general one can pull out factors of -1 and `i` from the argument:
>>> fresnelc(-z)
-fresnelc(z)
>>> fresnelc(I*z)
I*fresnelc(z)
The Fresnel C integral obeys the mirror symmetry
`\overline{C(z)} = C(\bar{z})`:
>>> from sympy import conjugate
>>> conjugate(fresnelc(z))
fresnelc(conjugate(z))
Differentiation with respect to `z` is supported:
>>> from sympy import diff
>>> diff(fresnelc(z), z)
cos(pi*z**2/2)
Defining the Fresnel functions via an integral
>>> from sympy import integrate, pi, cos, gamma, expand_func
>>> integrate(cos(pi*z**2/2), z)
fresnelc(z)*gamma(1/4)/(4*gamma(5/4))
>>> expand_func(integrate(cos(pi*z**2/2), z))
fresnelc(z)
We can numerically evaluate the Fresnel integral to arbitrary precision
on the whole complex plane:
>>> fresnelc(2).evalf(30)
0.488253406075340754500223503357
>>> fresnelc(-2*I).evalf(30)
-0.488253406075340754500223503357*I
See Also
========
fresnels: Fresnel sine integral.
References
==========
.. [1] http://en.wikipedia.org/wiki/Fresnel_integral
.. [2] http://dlmf.nist.gov/7
.. [3] http://mathworld.wolfram.com/FresnelIntegrals.html
.. [4] http://functions.wolfram.com/GammaBetaErf/FresnelC
.. [5] The converging factors for the fresnel integrals
by John W. Wrench Jr. and Vicki Alley
"""
_trigfunc = C.cos
_sign = S.One
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) > 1:
p = previous_terms[-1]
return (-pi**2*x**4*(4*n - 3)/(8*n*(2*n - 1)*(4*n + 1))) * p
else:
return x * (-x**4)**n * (S(2)**(-2*n)*pi**(2*n)) / ((4*n + 1)*C.factorial(2*n))
def _eval_rewrite_as_erf(self, z):
return (S.One - I)/4 * (erf((S.One + I)/2*sqrt(pi)*z) + I*erf((S.One - I)/2*sqrt(pi)*z))
def _eval_rewrite_as_hyper(self, z):
return z * hyper([S.One/4], [S.One/2, S(5)/4], -pi**2*z**4/16)
def _eval_rewrite_as_meijerg(self, z):
return (pi*z**(S(3)/4) / (sqrt(2)*root(z**2, 4)*root(-z, 4))
* meijerg([], [1], [S(1)/4], [S(3)/4, 0], -pi**2*z**4/16))
def _eval_aseries(self, n, args0, x, logx):
point = args0[0]
# Expansion at oo
if point is S.Infinity:
z = self.args[0]
# expansion of C(x) = C1(x*sqrt(pi/2)), see reference[5] page 1-8
p = [(-1)**k * C.factorial(4*k + 1) /
(2**(2*k + 2) * z**(4*k + 3) * 2**(2*k)*C.factorial(2*k))
for k in xrange(0, n)]
q = [1/(2*z)] + [(-1)**k * C.factorial(4*k - 1) /
(2**(2*k + 1) * z**(4*k + 1) * 2**(2*k - 1)*C.factorial(2*k - 1))
for k in xrange(1, n)]
p = [-sqrt(2/pi)*t for t in p] + [C.Order(1/z**n, x)]
q = [ sqrt(2/pi)*t for t in q] + [C.Order(1/z**n, x)]
return S.Half + (C.cos(z**2)*Add(*p) + C.sin(z**2)*Add(*q)).subs(x, sqrt(2/pi)*x)
# All other points are not handled
return super(fresnelc, self)._eval_aseries(n, args0, x, logx)
###############################################################################
#################### HELPER FUNCTIONS #########################################
###############################################################################
class _erfs(Function):
"""
Helper function to make the `\\mathrm{erf}(z)` function
tractable for the Gruntz algorithm.
"""
def _eval_aseries(self, n, args0, x, logx):
point = args0[0]
# Expansion at oo
if point is S.Infinity:
z = self.args[0]
l = [ 1/sqrt(S.Pi) * C.factorial(2*k)*(-S(
4))**(-k)/C.factorial(k) * (1/z)**(2*k + 1) for k in xrange(0, n) ]
o = C.Order(1/z**(2*n + 1), x)
# It is very inefficient to first add the order and then do the nseries
return (Add(*l))._eval_nseries(x, n, logx) + o
# Expansion at I*oo
t = point.extract_multiplicatively(S.ImaginaryUnit)
if t is S.Infinity:
z = self.args[0]
# TODO: is the series really correct?
l = [ 1/sqrt(S.Pi) * C.factorial(2*k)*(-S(
4))**(-k)/C.factorial(k) * (1/z)**(2*k + 1) for k in xrange(0, n) ]
o = C.Order(1/z**(2*n + 1), x)
# It is very inefficient to first add the order and then do the nseries
return (Add(*l))._eval_nseries(x, n, logx) + o
# All other points are not handled
return super(_erfs, self)._eval_aseries(n, args0, x, logx)
def fdiff(self, argindex=1):
if argindex == 1:
z = self.args[0]
return -2/sqrt(S.Pi) + 2*z*_erfs(z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_intractable(self, z):
return (S.One - erf(z))*C.exp(z**2)
class _eis(Function):
"""
Helper function to make the `\\mathrm{Ei}(z)` and `\\mathrm{li}(z)` functions
tractable for the Gruntz algorithm.
"""
def _eval_aseries(self, n, args0, x, logx):
if args0[0] != S.Infinity:
return super(_erfs, self)._eval_aseries(n, args0, x, logx)
z = self.args[0]
l = [ C.factorial(k) * (1/z)**(k + 1) for k in xrange(0, n) ]
o = C.Order(1/z**(n + 1), x)
# It is very inefficient to first add the order and then do the nseries
return (Add(*l))._eval_nseries(x, n, logx) + o
def fdiff(self, argindex=1):
if argindex == 1:
z = self.args[0]
return S.One / z - _eis(z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_intractable(self, z):
return C.exp(-z)*Ei(z)
def _eval_nseries(self, x, n, logx):
x0 = self.args[0].limit(x, 0)
if x0 is S.Zero:
f = self._eval_rewrite_as_intractable(*self.args)
return f._eval_nseries(x, n, logx)
return super(_eis, self)._eval_nseries(x, n, logx)
| mit |
Tejal011089/digitales_frappe | frappe/permissions.py | 27 | 8898 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, copy, json
from frappe import _, msgprint
from frappe.utils import cint
rights = ("read", "write", "create", "delete", "submit", "cancel", "amend",
"print", "email", "report", "import", "export", "set_user_permissions")
def check_admin_or_system_manager(user=None):
if not user: user = frappe.session.user
if ("System Manager" not in frappe.get_roles(user)) and (user!="Administrator"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
def has_permission(doctype, ptype="read", doc=None, verbose=True, user=None):
"""check if user has permission"""
if not user: user = frappe.session.user
if frappe.is_table(doctype):
return True
meta = frappe.get_meta(doctype)
if ptype=="submit" and not cint(meta.is_submittable):
return False
if ptype=="import" and not cint(meta.allow_import):
return False
if user=="Administrator":
return True
role_permissions = get_role_permissions(meta, user=user)
if not role_permissions.get(ptype):
return False
if doc:
if isinstance(doc, basestring):
doc = frappe.get_doc(meta.name, doc)
if role_permissions["apply_user_permissions"].get(ptype):
if not user_has_permission(doc, verbose=verbose, user=user,
user_permission_doctypes=role_permissions.get("user_permission_doctypes")):
return False
if not has_controller_permissions(doc, ptype, user=user):
return False
return True
def get_doc_permissions(doc, verbose=False, user=None):
if not user: user = frappe.session.user
if frappe.is_table(doc.doctype):
return {"read":1, "write":1}
meta = frappe.get_meta(doc.doctype)
role_permissions = copy.deepcopy(get_role_permissions(meta, user=user))
if not cint(meta.is_submittable):
role_permissions["submit"] = 0
if not cint(meta.allow_import):
role_permissions["import"] = 0
if role_permissions.get("apply_user_permissions") and not user_has_permission(doc, verbose=verbose, user=user,
user_permission_doctypes=role_permissions.get("user_permission_doctypes")):
# no user permissions, switch off all user-level permissions
for ptype in role_permissions:
if role_permissions["apply_user_permissions"].get(ptype):
role_permissions[ptype] = 0
return role_permissions
def get_role_permissions(meta, user=None):
if not user: user = frappe.session.user
cache_key = (meta.name, user)
if not frappe.local.role_permissions.get(cache_key):
perms = frappe._dict({ "apply_user_permissions": {} })
user_roles = frappe.get_roles(user)
for p in meta.permissions:
if cint(p.permlevel)==0 and (p.role in user_roles):
for ptype in rights:
perms[ptype] = perms.get(ptype, 0) or cint(p.get(ptype))
if ptype != "set_user_permissions" and p.get(ptype):
perms["apply_user_permissions"][ptype] = (perms["apply_user_permissions"].get(ptype, 1)
and p.get("apply_user_permissions"))
if p.apply_user_permissions:
# set user_permission_doctypes in perms
user_permission_doctypes = (json.loads(p.user_permission_doctypes)
if p.user_permission_doctypes else None)
if user_permission_doctypes and user_permission_doctypes not in perms.get("user_permission_doctypes", []):
# perms["user_permission_doctypes"] would be a list of list like [["User", "Blog Post"], ["User"]]
perms.setdefault("user_permission_doctypes", []).append(user_permission_doctypes)
for key, value in perms.get("apply_user_permissions").items():
if not value:
del perms["apply_user_permissions"][key]
frappe.local.role_permissions[cache_key] = perms
return frappe.local.role_permissions[cache_key]
def user_has_permission(doc, verbose=True, user=None, user_permission_doctypes=None):
from frappe.defaults import get_user_permissions
user_permissions = get_user_permissions(user)
user_permission_doctypes = get_user_permission_doctypes(user_permission_doctypes, user_permissions)
def check_user_permission(d):
meta = frappe.get_meta(d.get("doctype"))
end_result = False
messages = {}
# check multiple sets of user_permission_doctypes using OR condition
for doctypes in user_permission_doctypes:
result = True
for df in meta.get_fields_to_check_permissions(doctypes):
if (df.options in user_permissions and d.get(df.fieldname)
and d.get(df.fieldname) not in user_permissions[df.options]):
result = False
if verbose:
msg = _("Not allowed to access {0} with {1} = {2}").format(df.options, _(df.label), d.get(df.fieldname))
if d.parentfield:
msg = "{doctype}, {row} #{idx}, ".format(doctype=_(d.doctype),
row=_("Row"), idx=d.idx) + msg
messages[df.fieldname] = msg
end_result = end_result or result
if not end_result and messages:
for fieldname, msg in messages.items():
msgprint(msg)
return end_result
_user_has_permission = check_user_permission(doc)
for d in doc.get_all_children():
_user_has_permission = check_user_permission(d) and _user_has_permission
return _user_has_permission
def has_controller_permissions(doc, ptype, user=None):
if not user: user = frappe.session.user
for method in frappe.get_hooks("has_permission").get(doc.doctype, []):
if not frappe.call(frappe.get_attr(method), doc=doc, ptype=ptype, user=user):
return False
return True
def can_set_user_permissions(doctype, docname=None):
# System Manager can always set user permissions
if "System Manager" in frappe.get_roles():
return True
meta = frappe.get_meta(doctype)
# check if current user has read permission for docname
if docname and not has_permission(doctype, "read", docname):
return False
# check if current user has a role that can set permission
if get_role_permissions(meta).set_user_permissions!=1:
return False
return True
def set_user_permission_if_allowed(doctype, name, user, with_message=False):
if get_role_permissions(frappe.get_meta(doctype), user).set_user_permissions!=1:
add_user_permission(doctype, name, user, with_message)
def add_user_permission(doctype, name, user, with_message=False):
if name not in frappe.defaults.get_user_permissions(user).get(doctype, []):
if not frappe.db.exists(doctype, name):
frappe.throw(_("{0} {1} not found").format(_(doctype), name), frappe.DoesNotExistError)
frappe.defaults.add_default(doctype, name, user, "User Permission")
elif with_message:
msgprint(_("Permission already set"))
def remove_user_permission(doctype, name, user, default_value_name=None):
frappe.defaults.clear_default(key=doctype, value=name, parent=user, parenttype="User Permission",
name=default_value_name)
def clear_user_permissions_for_doctype(doctype):
frappe.defaults.clear_default(parenttype="User Permission", key=doctype)
def can_import(doctype, raise_exception=False):
if not ("System Manager" in frappe.get_roles() or has_permission(doctype, "import")):
if raise_exception:
raise frappe.PermissionError("You are not allowed to import: {doctype}".format(doctype=doctype))
else:
return False
return True
def can_export(doctype, raise_exception=False):
if not ("System Manager" in frappe.get_roles() or has_permission(doctype, "export")):
if raise_exception:
raise frappe.PermissionError("You are not allowed to export: {doctype}".format(doctype=doctype))
else:
return False
return True
def apply_user_permissions(doctype, ptype, user=None):
"""Check if apply_user_permissions is checked for a doctype, perm type, user combination"""
role_permissions = get_role_permissions(frappe.get_meta(doctype), user=user)
return role_permissions.get("apply_user_permissions", {}).get(ptype)
def get_user_permission_doctypes(user_permission_doctypes, user_permissions):
"""returns a list of list like [["User", "Blog Post"], ["User"]]"""
if user_permission_doctypes:
# select those user permission doctypes for which user permissions exist!
user_permission_doctypes = [list(set(doctypes).intersection(set(user_permissions.keys())))
for doctypes in user_permission_doctypes]
else:
user_permission_doctypes = [user_permissions.keys()]
if len(user_permission_doctypes) > 1:
# OPTIMIZATION
# if intersection exists, use that to reduce the amount of querying
# for example, [["Blogger", "Blog Category"], ["Blogger"]], should only search in [["Blogger"]] as the first and condition becomes redundant
common = user_permission_doctypes[0]
for i in xrange(1, len(user_permission_doctypes), 1):
common = list(set(common).intersection(set(user_permission_doctypes[i])))
if not common:
break
if common:
# is common one of the user_permission_doctypes set?
for doctypes in user_permission_doctypes:
# are these lists equal?
if set(common) == set(doctypes):
user_permission_doctypes = [common]
break
return user_permission_doctypes
| mit |
vedujoshi/tempest | tempest/api/network/admin/test_external_networks_negative.py | 2 | 2408 | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.network import base
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
class ExternalNetworksAdminNegativeTestJSON(base.BaseAdminNetworkTest):
@decorators.attr(type=['negative'])
@decorators.idempotent_id('d402ae6c-0be0-4d8e-833b-a738895d98d0')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_create_port_with_precreated_floatingip_as_fixed_ip(self):
# NOTE: External networks can be used to create both floating-ip as
# well as instance-ip. So, creating an instance-ip with a value of a
# pre-created floating-ip should be denied.
# create a floating ip
body = self.admin_floating_ips_client.create_floatingip(
floating_network_id=CONF.network.public_network_id)
created_floating_ip = body['floatingip']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.admin_floating_ips_client.delete_floatingip,
created_floating_ip['id'])
floating_ip_address = created_floating_ip['floating_ip_address']
self.assertIsNotNone(floating_ip_address)
# use the same value of floatingip as fixed-ip to create_port()
fixed_ips = [{'ip_address': floating_ip_address}]
# create a port which will internally create an instance-ip
self.assertRaises(lib_exc.Conflict,
self.admin_ports_client.create_port,
network_id=CONF.network.public_network_id,
fixed_ips=fixed_ips)
| apache-2.0 |
damngamerz/coala-bears | bears/php/PHPMessDetectorBear.py | 13 | 1820 | from coalib.bearlib.abstractions.Linter import linter
from coalib.settings.Setting import typed_list
from dependency_management.requirements.DistributionRequirement import (
DistributionRequirement)
from dependency_management.requirements.AnyOneOfRequirements import (
AnyOneOfRequirements)
from dependency_management.requirements.ComposerRequirement import (
ComposerRequirement)
@linter(executable='phpmd',
output_format='regex',
output_regex=r':(?P<line>\d+)\s*(?P<message>.*)')
class PHPMessDetectorBear:
"""
The bear takes a given PHP source code base and looks for several
potential problems within that source. These problems can be things like:
- Possible bugs
- Suboptimal code
- Overcomplicated expressions
- Unused parameters, methods, properties
"""
LANGUAGES = {'PHP'}
REQUIREMENTS = {
AnyOneOfRequirements(
[DistributionRequirement(apt_get='phpmd',
dnf='php-phpmd-PHP-PMD',
),
ComposerRequirement('phpmd/phpmd'),
],
),
}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Formatting', 'Complexity', 'Unused Code', 'Redundancy',
'Variable Misuse'}
SEE_MORE = 'https://phpmd.org/about.html'
@staticmethod
def create_arguments(filename, file, config_file,
phpmd_rulesets: typed_list(str)):
"""
:param phpmd_rulesets:
A list of rulesets to use for analysis.
Available rulesets: cleancode, codesize, controversial, design,
naming, unusedcode.
"""
return filename, 'text', ','.join(phpmd_rulesets)
| agpl-3.0 |
stephantul/wavesom | wavesom/wavesom.py | 1 | 6360 | """Code for the Time-dependent Wavesom."""
import numpy as np
import json
import cupy as cp
from tqdm import tqdm
from somber import Som
from somber.components.utilities import expo
class Wavesom(Som):
"""A Time-Dependent SOM."""
# Static property names
param_names = {'neighborhood',
'learning_rate',
'map_dimensions',
'weights',
'data_dimensionality',
'lrfunc',
'nbfunc',
'valfunc',
'argfunc',
'orth_len',
'phon_len'}
def __init__(self,
map_dimensions,
data_dimensionality,
learning_rate,
orth_len,
phon_len,
lrfunc=expo,
nbfunc=expo,
neighborhood=None):
super().__init__(map_dimensions,
data_dimensionality,
learning_rate,
lrfunc,
nbfunc,
neighborhood)
self.orth_len = orth_len
self.phon_len = phon_len
self.state = np.ones(len(self.weights))
@classmethod
def load(cls, path, array_type=np):
"""
Load a Wavesom.
:param path: The path to the JSON file where the wavesom is stored
:param array_type: The array type to use.
:return: A wavesom.
"""
data = json.load(open(path))
weights = data['weights']
weights = array_type.asarray(weights, dtype=np.float32)
lrfunc = expo if data['lrfunc'] == 'expo' else linear
nbfunc = expo if data['nbfunc'] == 'expo' else linear
s = cls(data['map_dimensions'],
data['data_dimensionality'],
data['learning_rate'],
data['orth_len'],
data['phon_len'],
lrfunc=lrfunc,
nbfunc=nbfunc,
neighborhood=data['neighborhood'])
s.weights = weights
s.trained = True
return s
def predict_distance_part(self,
X,
offset,
batch_size=1,
show_progressbar=False):
"""
Compute the prediction for part of the weights, specified by an offset.
:param X: The input data
:param offset: The offset which is applied to axis 1 of X before
calculating similarity.
:return: A matrix containing the distance of each sample to
each weight.
"""
xp = cp.get_array_module()
batched = self._create_batches(X, batch_size, shuffle_data=False)
activations = []
temp_weights = self.weights[:, offset:(offset+X.shape[1])]
for x in tqdm(batched, disable=not show_progressbar):
activations.extend(self.distance_function(x, temp_weights)[0])
activations = xp.asarray(activations, dtype=xp.float32)
activations = activations[:X.shape[0]]
return activations.reshape(X.shape[0], self.weight_dim)
def predict_part(self, X, offset, vec_length=0):
"""
Predict BMUs based on part of the weights.
:param X: The input data.
:param offset: The offset which is applied to axis 1 of X before
calculating similarity.
:param orth_vec_len: The length of the vector over which similarity
is calculated.
:return:
"""
if vec_length:
X = X[:, :vec_length]
dist = self.predict_distance_part(X, offset)
return dist.__getattr__(self.argfunc)(axis=1)
def statify(self):
"""Extract the current state vector as an exemplar."""
p = (self.weights * self.state[:, None]).mean(0)
return p
def activation_function(self, x):
"""
Generate an activation given some input.
The activation function returns an n-dimensional vector between 0
and 1, where values closer to 1 imply more similarity.
:param x: The input datum.
:return: An activation.
"""
x = np.exp(-np.squeeze(self.predict_distance_part(x[None, :], 0)))
x -= (x.mean() + x.std())
return x
def converge(self, x, max_iter=1000, tol=0.001):
"""
Run activations until convergence.
Convergence is specified as the point when the difference between
the state vector in the current step and the previous step is closer
than the tolerance.
:param x: The input.
:param max_iter: The maximum iterations to run for.
:param tol: The tolerance threshold.
:return: A 2D array, containing the states the system moved through
while converging.
"""
output = []
for idx in range(max_iter):
s = self.activate(x, iterations=1)
if idx != 0 and np.abs(np.sum(s[0] - output[-1])) < tol:
break
output.append(np.squeeze(s))
output = np.array(output)
if output.ndim == 1:
return output[None, :]
return output
def activate(self, x=None, iterations=20):
"""
Activate the network for a number of iterations.
:param x: The input, can be None, in which case the systrm oscillates.
:param iterations: The number of iterations for which to run.
:return: A 2D array, containing the states the system moved through
"""
if x is None:
x = np.zeros((len(self.weights)))
else:
x = self.activation_function(x)
output = []
for idx in range(iterations):
p = self.activation_function(self.statify())
delta = x + p
pos = delta >= 0
neg = delta < 0
# The ceiling is set at 2.0
# This term ensures that updates get smaller as
# activation approaches the ceiling.
ceiling = (1.0 - (self.state[pos] / 2.))
# Do dampening.
self.state[pos] += delta[pos] * ceiling
self.state[neg] += delta[neg] * self.state[neg]
output.append(np.copy(self.state))
return np.array(output)
| mit |
kevclarx/ansible | lib/ansible/modules/monitoring/monit.py | 49 | 7071 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Darryl Stoflet <stoflet@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: monit
short_description: Manage the state of a program monitored via Monit
description:
- Manage the state of a program monitored via I(Monit)
version_added: "1.2"
options:
name:
description:
- The name of the I(monit) program/process to manage
required: true
default: null
state:
description:
- The state of service
required: true
default: null
choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ]
timeout:
description:
- If there are pending actions for the service monitored by monit, then Ansible will check
for up to this many seconds to verify the the requested action has been performed.
Ansible will sleep for five seconds between each check.
required: false
default: 300
version_added: "2.1"
requirements: [ ]
author: "Darryl Stoflet (@dstoflet)"
'''
EXAMPLES = '''
# Manage the state of program "httpd" to be in "started" state.
- monit:
name: httpd
state: started
'''
import time
def main():
arg_spec = dict(
name=dict(required=True),
timeout=dict(default=300, type='int'),
state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded'])
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
name = module.params['name']
state = module.params['state']
timeout = module.params['timeout']
MONIT = module.get_bin_path('monit', True)
def status():
"""Return the status of the process in monit, or the empty string if not present."""
rc, out, err = module.run_command('%s summary' % MONIT, check_rc=True)
for line in out.split('\n'):
# Sample output lines:
# Process 'name' Running
# Process 'name' Running - restart pending
parts = line.split()
if len(parts) > 2 and parts[0].lower() == 'process' and parts[1] == "'%s'" % name:
return ' '.join(parts[2:]).lower()
else:
return ''
def run_command(command):
"""Runs a monit command, and returns the new status."""
module.run_command('%s %s %s' % (MONIT, command, name), check_rc=True)
return status()
def wait_for_monit_to_stop_pending():
"""Fails this run if there is no status or it's pending/initalizing for timeout"""
timeout_time = time.time() + timeout
sleep_time = 5
running_status = status()
while running_status == '' or 'pending' in running_status or 'initializing' in running_status:
if time.time() >= timeout_time:
module.fail_json(
msg='waited too long for "pending", or "initiating" status to go away ({0})'.format(
running_status
),
state=state
)
time.sleep(sleep_time)
running_status = status()
if state == 'reloaded':
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command('%s reload' % MONIT)
if rc != 0:
module.fail_json(msg='monit reload failed', stdout=out, stderr=err)
wait_for_monit_to_stop_pending()
module.exit_json(changed=True, name=name, state=state)
present = status() != ''
if not present and not state == 'present':
module.fail_json(msg='%s process not presently configured with monit' % name, name=name, state=state)
if state == 'present':
if not present:
if module.check_mode:
module.exit_json(changed=True)
status = run_command('reload')
if status == '':
wait_for_monit_to_stop_pending()
module.exit_json(changed=True, name=name, state=state)
module.exit_json(changed=False, name=name, state=state)
wait_for_monit_to_stop_pending()
running = 'running' in status()
if running and state in ['started', 'monitored']:
module.exit_json(changed=False, name=name, state=state)
if running and state == 'stopped':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('stop')
if status in ['not monitored'] or 'stop pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not stopped' % name, status=status)
if running and state == 'unmonitored':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('unmonitor')
if status in ['not monitored'] or 'unmonitor pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not unmonitored' % name, status=status)
elif state == 'restarted':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('restart')
if status in ['initializing', 'running'] or 'restart pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not restarted' % name, status=status)
elif not running and state == 'started':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('start')
if status in ['initializing', 'running'] or 'start pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not started' % name, status=status)
elif not running and state == 'monitored':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('monitor')
if status not in ['not monitored']:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not monitored' % name, status=status)
module.exit_json(changed=False, name=name, state=state)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
rotofly/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/AddAttachment.py | 384 | 11148 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import os
import uno
import unohelper
import xmlrpclib
import base64
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from lib.error import ErrorDialog
from lib.tools import *
from LoginTest import *
from lib.rpc import *
database="test"
uid = 3
class AddAttachment(unohelper.Base, XJobExecutor ):
Kind = {
'PDF' : 'pdf',
'OpenOffice': 'sxw',
}
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
LoginTest()
if not loginstatus and __name__=="package":
exit(1)
self.aSearchResult = []
desktop=getDesktop()
oDoc2 = desktop.getCurrentComponent()
docinfo=oDoc2.getDocumentInfo()
global passwd
self.password = passwd
global url
self.sock=RPCSession(url)
if docinfo.getUserFieldValue(2) <> "" and docinfo.getUserFieldValue(3) <> "":
self.win = DBModalDialog(60, 50, 180, 70, "Add Attachment to Server")
self.win.addFixedText("lblResourceType", 2 , 5, 100, 10, "Select Appropriate Resource Type:")
self.win.addComboListBox("lstResourceType", -2, 25, 176, 15,True)
self.win.addButton('btnOkWithoutInformation', -2 , -5, 25 , 15,'OK' ,actionListenerProc = self.btnOkWithoutInformation_clicked )
else:
self.win = DBModalDialog(60, 50, 180, 190, "Add Attachment to Server")
self.win.addFixedText("lblModuleName",2 , 9, 42, 20, "Select Module:")
self.win.addComboListBox("lstmodel", -2, 5, 134, 15,True)
self.lstModel = self.win.getControl( "lstmodel" )
self.dModel = {}
# Open a new connexion to the server
ids = self.sock.execute(database, uid, self.password, 'ir.module.module', 'search', [('name','=','base_report_model'),('state', '=', 'installed')])
if not len(ids):
# If the module 'base_report_model' is not installed, use the default model
self.dModel = {
"Partner":'res.partner',
}
else:
ids =self.sock.execute(database, uid, self.password, 'base.report.model' , 'search', [])
res = self.sock.execute(database, uid, self.password, 'base.report.model' , 'read', ids, ['name','model_id'])
models = self.sock.execute(database, uid, self.password, 'ir.model' , 'read', map(lambda x:x['model_id'][0], res), ['model'])
models = dict(map(lambda x:(x['id'],x['model']), models))
self.dModel = dict(map(lambda x: (x['name'],models[x['model_id'][0]]), res))
for item in self.dModel.keys():
self.lstModel.addItem(item, self.lstModel.getItemCount())
self.win.addFixedText("lblSearchName",2 , 25, 60, 10, "Enter Search String:")
self.win.addEdit("txtSearchName", 2, 35, 149, 15,)
self.win.addButton('btnSearch', -2 , 35, 25 , 15,'Search' ,actionListenerProc = self.btnSearch_clicked )
self.win.addFixedText("lblSearchRecord", 2 , 55, 60, 10, "Search Result:")
self.win.addComboListBox("lstResource", -2, 65, 176, 70, False )
self.lstResource = self.win.getControl( "lstResource" )
self.win.addFixedText("lblResourceType", 2 , 137, 100, 20, "Select Appropriate Resource Type:")
self.win.addComboListBox("lstResourceType", -2, 147, 176, 15,True )
self.win.addButton('btnOkWithInformation', -2 , -5, 25 , 15,'OK' ,actionListenerProc = self.btnOkWithInformation_clicked )
self.lstResourceType = self.win.getControl( "lstResourceType" )
for kind in self.Kind.keys():
self.lstResourceType.addItem( kind, self.lstResourceType.getItemCount() )
self.win.addButton('btnCancel', -2 - 27 , -5 , 30 , 15, 'Cancel' ,actionListenerProc = self.btnCancel_clicked )
self.win.doModalDialog("lstResourceType", self.Kind.keys()[0])
def btnSearch_clicked(self, oActionEvent):
modelSelectedItem = self.win.getListBoxSelectedItem("lstmodel")
if modelSelectedItem == "":
return
desktop=getDesktop()
oDoc2 = desktop.getCurrentComponent()
docinfo=oDoc2.getDocumentInfo()
self.aSearchResult =self.sock.execute( database, uid, self.password, self.dModel[modelSelectedItem], 'name_search', self.win.getEditText("txtSearchName"))
self.win.removeListBoxItems("lstResource", 0, self.win.getListBoxItemCount("lstResource"))
if self.aSearchResult == []:
ErrorDialog("No search result found.", "", "Search Error.")
return
for result in self.aSearchResult:
self.lstResource.addItem(result[1],result[0])
def _send_attachment(self, name, data, res_model, res_id):
desktop = getDesktop()
oDoc2 = desktop.getCurrentComponent()
docinfo = oDoc2.getDocumentInfo()
params = {
'name': name,
'datas': base64.encodestring( data ),
'datas_fname': name,
'res_model' : res_model,
'res_id' : int(res_id),
}
return self.sock.execute( database, uid, self.password, 'ir.attachment', 'create', params )
def send_attachment(self, model, resource_id):
desktop = getDesktop()
oDoc2 = desktop.getCurrentComponent()
docinfo = oDoc2.getDocumentInfo()
if oDoc2.getURL() == "":
ErrorDialog("You should save your file.", "", "Saving Error.")
return None
url = oDoc2.getURL()
if self.Kind[self.win.getListBoxSelectedItem("lstResourceType")] == "pdf":
url = self.doc2pdf(url[7:])
if url == None:
ErrorDialog( "Problem in creating PDF.", "", "PDF Error.")
return None
url = url[7:]
data = read_data_from_file( get_absolute_file_path( url ) )
return self._send_attachment( os.path.basename( url ), data, model, resource_id )
def btnOkWithoutInformation_clicked(self, oActionEvent):
desktop = getDesktop()
oDoc2 = desktop.getCurrentComponent()
docinfo = oDoc2.getDocumentInfo()
if self.win.getListBoxSelectedItem("lstResourceType") == "":
ErrorDialog("You have to select a resource type.", "", "Selection Error." )
return
res = self.send_attachment( docinfo.getUserFieldValue(3), docinfo.getUserFieldValue(2) )
self.win.endExecute()
def btnOkWithInformation_clicked(self, oActionEvent):
if self.win.getListBoxSelectedItem("lstResourceType") == "":
ErrorDialog( "You have to select a resource type.", "", "Selection Error." )
return
if self.win.getListBoxSelectedItem("lstResource") == "" or self.win.getListBoxSelectedItem("lstmodel") == "":
ErrorDialog("You have to select Model and Resource.", "", "Selection Error.")
return
resourceid = None
for s in self.aSearchResult:
if s[1] == self.win.getListBoxSelectedItem("lstResource"):
resourceid = s[0]
break
if resourceid == None:
ErrorDialog("No resource is selected.", "", "Resource Error." )
return
res = self.send_attachment( self.dModel[self.win.getListBoxSelectedItem('lstmodel')], resourceid )
self.win.endExecute()
def btnCancel_clicked(self, oActionEvent):
self.win.endExecute()
def doc2pdf(self, strFile):
oDoc = None
strFilterSubName = ''
strUrl = convertToURL( strFile )
desktop = getDesktop()
oDoc = desktop.loadComponentFromURL( strUrl, "_blank", 0, Array(self._MakePropertyValue("Hidden",True)))
if oDoc:
strFilterSubName = ""
# select appropriate filter
if oDoc.supportsService("com.sun.star.presentation.PresentationDocument"):
strFilterSubName = "impress_pdf_Export"
elif oDoc.supportsService("com.sun.star.sheet.SpreadsheetDocument"):
strFilterSubName = "calc_pdf_Export"
elif oDoc.supportsService("com.sun.star.text.WebDocument"):
strFilterSubName = "writer_web_pdf_Export"
elif oDoc.supportsService("com.sun.star.text.GlobalDocument"):
strFilterSubName = "writer_globaldocument_pdf_Export"
elif oDoc.supportsService("com.sun.star.text.TextDocument"):
strFilterSubName = "writer_pdf_Export"
elif oDoc.supportsService("com.sun.star.drawing.DrawingDocument"):
strFilterSubName = "draw_pdf_Export"
elif oDoc.supportsService("com.sun.star.formula.FormulaProperties"):
strFilterSubName = "math_pdf_Export"
elif oDoc.supportsService("com.sun.star.chart.ChartDocument"):
strFilterSubName = "chart_pdf_Export"
else:
pass
filename = len(strFilterSubName) > 0 and convertToURL( os.path.splitext( strFile )[0] + ".pdf" ) or None
if len(strFilterSubName) > 0:
oDoc.storeToURL( filename, Array(self._MakePropertyValue("FilterName", strFilterSubName ),self._MakePropertyValue("CompressMode", "1" )))
oDoc.close(True)
# Can be None if len(strFilterSubName) <= 0
return filename
def _MakePropertyValue(self, cName="", uValue=u""):
oPropertyValue = createUnoStruct( "com.sun.star.beans.PropertyValue" )
if cName:
oPropertyValue.Name = cName
if uValue:
oPropertyValue.Value = uValue
return oPropertyValue
if __name__<>"package" and __name__=="__main__":
AddAttachment(None)
elif __name__=="package":
g_ImplementationHelper.addImplementation( AddAttachment, "org.openoffice.openerp.report.addattachment", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
planetarymike/IDL-Colorbars | IDL_py_test/031_Peppermint.py | 1 | 7852 | from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
cm_data = [[0., 0., 0.313725],
[0., 0., 0.313725],
[0., 0., 0.627451],
[0., 0., 0.941176],
[0.313725, 0., 0.],
[0.313725, 0., 0.313725],
[0.313725, 0., 0.627451],
[0.313725, 0., 0.941176],
[0.627451, 0., 0.],
[0.627451, 0., 0.313725],
[0.627451, 0., 0.627451],
[0.627451, 0., 0.941176],
[0.941176, 0., 0.],
[0.941176, 0., 0.313725],
[0.941176, 0., 0.627451],
[0.941176, 0., 0.941176],
[0., 0.0627451, 0.],
[0., 0.0627451, 0.313725],
[0., 0.0627451, 0.627451],
[0., 0.0627451, 0.941176],
[0.313725, 0.0627451, 0.],
[0.313725, 0.0627451, 0.313725],
[0.313725, 0.0627451, 0.627451],
[0.313725, 0.0627451, 0.941176],
[0.627451, 0.0627451, 0.],
[0.627451, 0.0627451, 0.313725],
[0.627451, 0.0627451, 0.627451],
[0.627451, 0.0627451, 0.941176],
[0.941176, 0.0627451, 0.],
[0.941176, 0.0627451, 0.313725],
[0.941176, 0.0627451, 0.627451],
[0.941176, 0.0627451, 0.941176],
[0., 0.12549, 0.],
[0., 0.12549, 0.313725],
[0., 0.12549, 0.627451],
[0., 0.12549, 0.941176],
[0.313725, 0.12549, 0.],
[0.313725, 0.12549, 0.313725],
[0.313725, 0.12549, 0.627451],
[0.313725, 0.12549, 0.941176],
[0.627451, 0.12549, 0.],
[0.627451, 0.12549, 0.313725],
[0.627451, 0.12549, 0.627451],
[0.627451, 0.12549, 0.941176],
[0.941176, 0.12549, 0.],
[0.941176, 0.12549, 0.313725],
[0.941176, 0.12549, 0.627451],
[0.941176, 0.12549, 0.941176],
[0., 0.188235, 0.],
[0., 0.188235, 0.313725],
[0., 0.188235, 0.627451],
[0., 0.188235, 0.941176],
[0.313725, 0.188235, 0.],
[0.313725, 0.188235, 0.313725],
[0.313725, 0.188235, 0.627451],
[0.313725, 0.188235, 0.941176],
[0.627451, 0.188235, 0.],
[0.627451, 0.188235, 0.313725],
[0.627451, 0.188235, 0.627451],
[0.627451, 0.188235, 0.941176],
[0.941176, 0.188235, 0.],
[0.941176, 0.188235, 0.313725],
[0.941176, 0.188235, 0.627451],
[0.941176, 0.188235, 0.941176],
[0., 0.25098, 0.],
[0., 0.25098, 0.313725],
[0., 0.25098, 0.627451],
[0., 0.25098, 0.941176],
[0.313725, 0.25098, 0.],
[0.313725, 0.25098, 0.313725],
[0.313725, 0.25098, 0.627451],
[0.313725, 0.25098, 0.941176],
[0.627451, 0.25098, 0.],
[0.627451, 0.25098, 0.313725],
[0.627451, 0.25098, 0.627451],
[0.627451, 0.25098, 0.941176],
[0.941176, 0.25098, 0.],
[0.941176, 0.25098, 0.313725],
[0.941176, 0.25098, 0.627451],
[0.941176, 0.25098, 0.941176],
[0., 0.313725, 0.],
[0., 0.313725, 0.313725],
[0., 0.313725, 0.627451],
[0., 0.313725, 0.941176],
[0.313725, 0.313725, 0.],
[0.313725, 0.313725, 0.313725],
[0.313725, 0.313725, 0.627451],
[0.313725, 0.313725, 0.941176],
[0.627451, 0.313725, 0.],
[0.627451, 0.313725, 0.313725],
[0.627451, 0.313725, 0.627451],
[0.627451, 0.313725, 0.941176],
[0.941176, 0.313725, 0.],
[0.941176, 0.313725, 0.313725],
[0.941176, 0.313725, 0.627451],
[0.941176, 0.313725, 0.941176],
[0., 0.376471, 0.],
[0., 0.376471, 0.313725],
[0., 0.376471, 0.627451],
[0., 0.376471, 0.941176],
[0.313725, 0.376471, 0.],
[0.313725, 0.376471, 0.313725],
[0.313725, 0.376471, 0.627451],
[0.313725, 0.376471, 0.941176],
[0.627451, 0.376471, 0.],
[0.627451, 0.376471, 0.313725],
[0.627451, 0.376471, 0.627451],
[0.627451, 0.376471, 0.941176],
[0.941176, 0.376471, 0.],
[0.941176, 0.376471, 0.313725],
[0.941176, 0.376471, 0.627451],
[0.941176, 0.376471, 0.941176],
[0., 0.439216, 0.],
[0., 0.439216, 0.313725],
[0., 0.439216, 0.627451],
[0., 0.439216, 0.941176],
[0.313725, 0.439216, 0.],
[0.313725, 0.439216, 0.313725],
[0.313725, 0.439216, 0.627451],
[0.313725, 0.439216, 0.941176],
[0.627451, 0.439216, 0.],
[0.627451, 0.439216, 0.313725],
[0.627451, 0.439216, 0.627451],
[0.627451, 0.439216, 0.941176],
[0.941176, 0.439216, 0.],
[0.941176, 0.439216, 0.313725],
[0.941176, 0.439216, 0.627451],
[0.941176, 0.439216, 0.941176],
[0., 0.501961, 0.],
[0., 0.501961, 0.313725],
[0., 0.501961, 0.627451],
[0., 0.501961, 0.941176],
[0.313725, 0.501961, 0.],
[0.313725, 0.501961, 0.313725],
[0.313725, 0.501961, 0.627451],
[0.313725, 0.501961, 0.941176],
[0.627451, 0.501961, 0.],
[0.627451, 0.501961, 0.313725],
[0.627451, 0.501961, 0.627451],
[0.627451, 0.501961, 0.941176],
[0.941176, 0.501961, 0.],
[0.941176, 0.501961, 0.313725],
[0.941176, 0.501961, 0.627451],
[0.941176, 0.501961, 0.941176],
[0., 0.564706, 0.],
[0., 0.564706, 0.313725],
[0., 0.564706, 0.627451],
[0., 0.564706, 0.941176],
[0.313725, 0.564706, 0.],
[0.313725, 0.564706, 0.313725],
[0.313725, 0.564706, 0.627451],
[0.313725, 0.564706, 0.941176],
[0.627451, 0.564706, 0.],
[0.627451, 0.564706, 0.313725],
[0.627451, 0.564706, 0.627451],
[0.627451, 0.564706, 0.941176],
[0.941176, 0.564706, 0.],
[0.941176, 0.564706, 0.313725],
[0.941176, 0.564706, 0.627451],
[0.941176, 0.564706, 0.941176],
[0., 0.627451, 0.],
[0., 0.627451, 0.313725],
[0., 0.627451, 0.627451],
[0., 0.627451, 0.941176],
[0.313725, 0.627451, 0.],
[0.313725, 0.627451, 0.313725],
[0.313725, 0.627451, 0.627451],
[0.313725, 0.627451, 0.941176],
[0.627451, 0.627451, 0.],
[0.627451, 0.627451, 0.313725],
[0.627451, 0.627451, 0.627451],
[0.627451, 0.627451, 0.941176],
[0.941176, 0.627451, 0.],
[0.941176, 0.627451, 0.313725],
[0.941176, 0.627451, 0.627451],
[0.941176, 0.627451, 0.941176],
[0., 0.690196, 0.],
[0., 0.690196, 0.313725],
[0., 0.690196, 0.627451],
[0., 0.690196, 0.941176],
[0.313725, 0.690196, 0.],
[0.313725, 0.690196, 0.313725],
[0.313725, 0.690196, 0.627451],
[0.313725, 0.690196, 0.941176],
[0.627451, 0.690196, 0.],
[0.627451, 0.690196, 0.313725],
[0.627451, 0.690196, 0.627451],
[0.627451, 0.690196, 0.941176],
[0.941176, 0.690196, 0.],
[0.941176, 0.690196, 0.313725],
[0.941176, 0.690196, 0.627451],
[0.941176, 0.690196, 0.941176],
[0., 0.752941, 0.],
[0., 0.752941, 0.313725],
[0., 0.752941, 0.627451],
[0., 0.752941, 0.941176],
[0.313725, 0.752941, 0.],
[0.313725, 0.752941, 0.313725],
[0.317647, 0.756863, 0.627451],
[0.313725, 0.752941, 0.941176],
[0.627451, 0.752941, 0.],
[0.627451, 0.752941, 0.313725],
[0.627451, 0.752941, 0.627451],
[0.627451, 0.752941, 0.941176],
[0.941176, 0.752941, 0.],
[0.941176, 0.752941, 0.313725],
[0.941176, 0.752941, 0.627451],
[0.941176, 0.752941, 0.941176],
[0., 0.815686, 0.],
[0., 0.815686, 0.313725],
[0., 0.815686, 0.627451],
[0., 0.815686, 0.941176],
[0.313725, 0.815686, 0.],
[0.313725, 0.815686, 0.313725],
[0.313725, 0.815686, 0.627451],
[0.313725, 0.815686, 0.941176],
[0.627451, 0.815686, 0.],
[0.627451, 0.815686, 0.313725],
[0.627451, 0.815686, 0.627451],
[0.627451, 0.815686, 0.941176],
[0.941176, 0.815686, 0.],
[0.941176, 0.815686, 0.313725],
[0.941176, 0.815686, 0.627451],
[0.941176, 0.815686, 0.941176],
[0., 0.878431, 0.],
[0., 0.878431, 0.313725],
[0., 0.878431, 0.627451],
[0., 0.878431, 0.941176],
[0.313725, 0.878431, 0.],
[0.313725, 0.878431, 0.313725],
[0.313725, 0.878431, 0.627451],
[0.313725, 0.878431, 0.941176],
[0.627451, 0.878431, 0.],
[0.627451, 0.878431, 0.313725],
[0.627451, 0.878431, 0.627451],
[0.627451, 0.878431, 0.941176],
[0.941176, 0.878431, 0.],
[0.941176, 0.878431, 0.313725],
[0.941176, 0.878431, 0.627451],
[0.941176, 0.878431, 0.941176],
[0., 0.941176, 0.],
[0., 0.941176, 0.313725],
[0., 0.941176, 0.627451],
[0., 0.941176, 0.941176],
[0.313725, 0.941176, 0.],
[0.313725, 0.941176, 0.313725],
[0.313725, 0.941176, 0.627451],
[0.313725, 0.941176, 0.941176],
[0.627451, 0.941176, 0.],
[0.627451, 0.941176, 0.313725],
[0.627451, 0.941176, 0.627451],
[0.627451, 0.941176, 0.941176],
[0.941176, 0.941176, 0.],
[0.941176, 0.941176, 0.313725],
[0.941176, 0.941176, 0.627451],
[0.941176, 0.941176, 0.627451]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from pycam02ucs.cm.viscm import viscm
viscm(test_cm)
except ImportError:
print("pycam02ucs not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
| gpl-2.0 |
levilucio/SyVOLT | GM2AUTOSAR_MM/MT_pre__MetaModelElement_T.py | 1 | 5208 | """
__MT_pre__MetaModelElement_T.py_____________________________________________________
Automatically generated AToM3 syntactic object (DO NOT MODIFY DIRECTLY)
Author: levi
Modified: Sun Aug 9 23:45:38 2015
____________________________________________________________________________________
"""
from ASGNode import *
from ATOM3Type import *
from ATOM3Text import *
from ATOM3String import *
from ATOM3Boolean import *
from graph_MT_pre__MetaModelElement_T import *
class MT_pre__MetaModelElement_T(ASGNode, ATOM3Type):
def __init__(self, parent = None):
ASGNode.__init__(self)
ATOM3Type.__init__(self)
self.superTypes = []
self.graphClass_ = graph_MT_pre__MetaModelElement_T
self.isGraphObjectVisual = True
if(hasattr(self, '_setHierarchicalLink')):
self._setHierarchicalLink(False)
if(hasattr(self, '_setHierarchicalNode')):
self._setHierarchicalNode(False)
self.parent = parent
self.MT_pre__attr1=ATOM3Text('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n', 80,15 )
self.MT_label__=ATOM3String('', 20)
self.MT_pivotOut__=ATOM3String('', 20)
self.MT_pivotIn__=ATOM3String('', 20)
self.MT_subtypeMatching__=ATOM3Boolean()
self.MT_subtypeMatching__.setValue(('True', 0))
self.MT_subtypeMatching__.config = 0
self.generatedAttributes = {'MT_pre__attr1': ('ATOM3Text', ),
'MT_label__': ('ATOM3String', ),
'MT_pivotOut__': ('ATOM3String', ),
'MT_pivotIn__': ('ATOM3String', ),
'MT_subtypeMatching__': ('ATOM3Boolean', ) }
self.realOrder = ['MT_pre__attr1','MT_label__','MT_pivotOut__','MT_pivotIn__','MT_subtypeMatching__']
self.directEditing = [0,1,1,1,1]
def clone(self):
cloneObject = MT_pre__MetaModelElement_T( self.parent )
for atr in self.realOrder:
cloneObject.setAttrValue(atr, self.getAttrValue(atr).clone() )
ASGNode.cloneActions(self, cloneObject)
return cloneObject
def copy(self, other):
ATOM3Type.copy(self, other)
for atr in self.realOrder:
self.setAttrValue(atr, other.getAttrValue(atr) )
ASGNode.copy(self, other)
def preCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preCondition(actionID, params)
else: return None
def postCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postCondition(actionID, params)
else: return None
def preAction (self, actionID, * params):
if actionID == self.CREATE:
self.autoIncrLabel(params)
if self.graphObject_:
return self.graphObject_.preAction(actionID, params)
else: return None
def postAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postAction(actionID, params)
else: return None
def QOCA(self, params):
"""
QOCA Constraint Template
NOTE: DO NOT select a POST/PRE action trigger
Constraints will be added/removed in a logical manner by other mechanisms.
"""
return # <---- Remove this to use QOCA
""" Get the high level constraint helper and solver """
from Qoca.atom3constraints.OffsetConstraints import OffsetConstraints
oc = OffsetConstraints(self.parent.qocaSolver)
"""
Example constraint, see Kernel/QOCA/atom3constraints/OffsetConstraints.py
For more types of constraints
"""
oc.fixedWidth(self.graphObject_, self.graphObject_.sizeX)
oc.fixedHeight(self.graphObject_, self.graphObject_.sizeY)
def autoIncrLabel(self, params):
#===============================================================================
# Auto increment the label
#===============================================================================
# If there is already one, ignore
if not self.MT_label__.isNone(): return
# Get the maximum label of all MT_pre__ elements
label = 0
for nt in self.parent.ASGroot.listNodes:
if nt.startswith('MT_pre__'):
for node in self.parent.ASGroot.listNodes[nt]:
currLabel = 0
try:
currLabel = int(node.MT_label__.getValue())
except:
pass
if currLabel > label:
label = currLabel
# The label of this instance will be the max label + 1
self.MT_label__.setValue(str(label + 1))
| mit |
cbun/assembly | lib/assembly/plugins/kiki.py | 1 | 1233 | import glob
import logging
import os
import subprocess
from plugins import BaseAssembler
from yapsy.IPlugin import IPlugin
class KikiAssembler(BaseAssembler, IPlugin):
new_version = True
def run(self, reads=None):
### Run Kiki Assembler
self.arast_popen([self.executable, '-k', self.k, '-i'] + self.data.readfiles + ['-o', self.outpath + '/kiki'])
### Find Contig Files
contigs = glob.glob(self.outpath + '/*.contig')
contigs_renamed = [contig + '.fa' for contig in contigs]
### Convert to standard FastA
for i in range(len(contigs)):
self.tab_to_fasta(contigs[i], contigs_renamed[i], self.contig_threshold)
return {'contigs': contigs_renamed}
def tab_to_fasta(self, tabbed_file, outfile, threshold):
""" Converter for Kiki format """
tabbed = open(tabbed_file, 'r')
fasta = open(outfile, 'w')
prefixes = ['>_', ' len_', ' cov_', ' stdev_', ' GC_', ' seed_', '\n']
for line in tabbed:
l = line.split('\t')
if int(l[1]) >= int(threshold):
for i in range(len(l)):
fasta.write(prefixes[i] + l[i])
tabbed.close()
fasta.close()
| mit |
davzhang/helix-python-binding | org/apache/helix/agent/HelixAgentMain.py | 1 | 5832 | # package org.apache.helix.agent
#from org.apache.helix.agent import *
#from java.util import Arrays
#from org.apache.commons.cli import CommandLine
#from org.apache.commons.cli import CommandLineParser
#from org.apache.commons.cli import GnuParser
#from org.apache.commons.cli import HelpFormatter
#from org.apache.commons.cli import Option
#from org.apache.commons.cli import OptionBuilder
#from org.apache.commons.cli import Options
#from org.apache.commons.cli import ParseException
#from org.apache.helix.HelixManager import HelixManager
#from org.apache.helix import InstanceType
#from org.apache.helix.manager.zk import ZKHelixManager
#from org.apache.helix.participant import StateMachineEngine
#from org.apache.log4j import Logger
from optparse import OptionParser
import threading
import sys
from org.apache.helix.HelixManagerFactory import HelixManagerFactory
from org.apache.helix.InstanceType import InstanceType
from org.apache.helix.agent.AgentStateModelFactory import AgentStateModelFactory
from org.apache.helix.util.logger import get_logger
class HelixAgentMain(object):
"""
Java modifiers:
private static
Type:
Logger
"""
logger = get_logger(__name__)
"""
Java modifiers:
final static
Type:
String
"""
zkAddr = "zkSvr"
"""
Java modifiers:
final static
Type:
String
"""
cluster = "cluster"
"""
Java modifiers:
final static
Type:
String
"""
help = "help"
"""
Java modifiers:
final static
Type:
String
"""
instanceName = "instanceName"
"""
Java modifiers:
final static
Type:
String
"""
stateModel = "stateModel"
def constructCommandLineOptions(argv):
"""
Returns Options
# Annotation: @SuppressWarnings("static-access")
Java modifiers:
private static synchronized
"""
parser = OptionParser(usage="usage: %prog [options]")
# Option
# helpOption = OptionBuilder.withLongOpt(help).withDescription("Prints command-line options info").create();
# Option
# zkAddrOption = OptionBuilder.withLongOpt(zkAddr).hasArgs(1).isRequired(True).withArgName("ZookeeperServerAddress(Required)").withDescription("Provide zookeeper address").create();
parser.add_option("--zkAddr", action="store", dest="zkAddr", default="localhost:2181", help="Provide zookeeper address [default: %default]")
# Option
# clusterOption = OptionBuilder.withLongOpt(cluster).hasArgs(1).isRequired(True).withArgName("Cluster name (Required)").withDescription("Provide cluster name").create();
parser.add_option("--cluster", action="store", dest="cluster", default="CLUSTER", help="cluster name [default: %default]")
# Option
# instanceNameOption = OptionBuilder.withLongOpt(instanceName).hasArgs(1).isRequired(True).withArgName("Helix agent name (Required)").withDescription("Provide Helix agent name").create();
parser.add_option("--instanceName", action="store", dest="instanceName", default="instance", help="Helix agent name [default: %default]")
# Option
# stateModelOption = OptionBuilder.withLongOpt(stateModel).hasArgs(1).isRequired(True).withArgName("State model name (Required)").withDescription("Provide state model name").create();
parser.add_option("--stateModel", action="store", dest="stateModel", default="MasterSlave", help="State model name name [default: %default]")
return parser
def processCommandLineArgs(argv):
parser = constructCommandLineOptions(argv)
(options, args) = parser.parse_args()
return options
# cliParser = GnuParser();
# # Options
# cliOptions = constructCommandLineOptions();
# try:
# return cliParser.parse(cliOptions, cliArgs)
# except ParseException, pe:
# LOG.error("fail to parse command-line options. cliArgs: " + Arrays.toString(cliArgs), pe);
# printUsage(cliOptions);
# System.exit(1);
#
# return None
# class HelixAgentShutdownHook(Thread):
#
#
#
# """
#
# Parameters:
# HelixManager manager
# """
# def __init__(self, manager):
# _manager = manager;
#
#
# def run(self):
# """
# Returns void
# @Override
#
#
# """
# LOG.info("HelixAgentShutdownHook invoked. agent: " + _manager.getInstanceName());
# if _manager != None && _manager.isConnected():
# _manager.disconnect();
def main(args):
"""
Returns void
Parameters:
args: String[]
Java modifiers:
static
Throws:
Exception
"""
# CommandLine
options = processCommandLineArgs(args)
# String
zkAddress = options.zkAddr
# String
clusterName = options.cluster
# String
instance = options.instanceName
# String
stateModelName = options.stateModel
# HelixManager
# manager = ZKHelixManager(clusterName, instance, InstanceType.PARTICIPANT, zkAddress);
manager = HelixManagerFactory.getZKHelixManager(clusterName, instance, InstanceType.PARTICIPANT, zkAddress)
# StateMachineEngine
stateMach = manager.getStateMachineEngine()
stateMach.registerStateModelFactory(stateModelName, AgentStateModelFactory())
# Runtime.getRuntime().addShutdownHook(HelixAgentShutdownHook(manager))
manager.connect()
# try:
# manager.connect();
# Thread.currentThread().join();
# except Exception, e:
# LOG.error(e);
# final:
# if manager != None && manager.isConnected():
# manager.disconnect();
# TODO: join here OK?
for thread in threading.enumerate():
if thread is not threading.currentThread():
thread.join()
if __name__ == "__main__":
main(sys.argv[1:])
| apache-2.0 |
Tejas-Subramanya/RYU_MEC | ryu/ofproto/nx_match.py | 1 | 43134 | # Copyright (C) 2011-2015 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011, 2012 Isaku Yamahata <yamahata at valinux co jp>
# Copyright (C) 2012 Simon Horman <horms ad verge net au>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
import sys
from ryu import exception
from ryu.lib import mac
from ryu.lib import type_desc
from ryu.lib.pack_utils import msg_pack_into
from ryu.ofproto import ether
from ryu.ofproto import ofproto_parser
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import inet
from ryu.ofproto import oxm_fields
import logging
LOG = logging.getLogger('ryu.ofproto.nx_match')
UINT64_MAX = (1 << 64) - 1
UINT32_MAX = (1 << 32) - 1
UINT16_MAX = (1 << 16) - 1
FWW_IN_PORT = 1 << 0
FWW_DL_TYPE = 1 << 4
FWW_NW_PROTO = 1 << 5
# No corresponding OFPFW_* bits
FWW_NW_DSCP = 1 << 1
FWW_NW_ECN = 1 << 2
FWW_ARP_SHA = 1 << 3
FWW_ARP_THA = 1 << 6
FWW_IPV6_LABEL = 1 << 7
FWW_NW_TTL = 1 << 8
FWW_ALL = (1 << 13) - 1
FLOW_NW_FRAG_ANY = 1 << 0
FLOW_NW_FRAG_LATER = 1 << 1
FLOW_NW_FRAG_MASK = FLOW_NW_FRAG_ANY | FLOW_NW_FRAG_LATER
IP_ECN_MASK = 0x03
IP_DSCP_MASK = 0xfc
MF_PACK_STRING_BE64 = '!Q'
MF_PACK_STRING_BE32 = '!I'
MF_PACK_STRING_BE16 = '!H'
MF_PACK_STRING_8 = '!B'
MF_PACK_STRING_MAC = '!6s'
MF_PACK_STRING_IPV6 = '!8H'
_MF_FIELDS = {}
FLOW_N_REGS = 8 # ovs 1.5
class Flow(ofproto_parser.StringifyMixin):
def __init__(self):
self.in_port = 0
self.dl_vlan = 0
self.dl_vlan_pcp = 0
self.dl_src = mac.DONTCARE
self.dl_dst = mac.DONTCARE
self.dl_type = 0
self.tp_dst = 0
self.tp_src = 0
self.nw_tos = 0
self.vlan_tci = 0
self.nw_ttl = 0
self.nw_proto = 0
self.arp_sha = 0
self.arp_tha = 0
self.nw_src = 0
self.nw_dst = 0
self.tun_id = 0
self.arp_spa = 0
self.arp_tpa = 0
self.ipv6_src = []
self.ipv6_dst = []
self.nd_target = []
self.nw_frag = 0
self.regs = [0] * FLOW_N_REGS
self.ipv6_label = 0
self.pkt_mark = 0
self.tcp_flags = 0
class FlowWildcards(ofproto_parser.StringifyMixin):
def __init__(self):
self.dl_src_mask = 0
self.dl_dst_mask = 0
self.tp_src_mask = 0
self.tp_dst_mask = 0
self.nw_src_mask = 0
self.nw_dst_mask = 0
self.tun_id_mask = 0
self.arp_spa_mask = 0
self.arp_tpa_mask = 0
self.vlan_tci_mask = 0
self.ipv6_src_mask = []
self.ipv6_dst_mask = []
self.nd_target_mask = []
self.nw_frag_mask = 0
self.regs_bits = 0
self.regs_mask = [0] * FLOW_N_REGS
self.wildcards = ofproto_v1_0.OFPFW_ALL
self.pkt_mark_mask = 0
self.tcp_flags_mask = 0
class ClsRule(ofproto_parser.StringifyMixin):
"""describe a matching rule for OF 1.0 OFPMatch (and NX).
"""
def __init__(self, **kwargs):
self.wc = FlowWildcards()
self.flow = Flow()
for key, value in kwargs.items():
if key[:3] == 'reg':
register = int(key[3:] or -1)
self.set_reg(register, value)
continue
setter = getattr(self, 'set_' + key, None)
if not setter:
LOG.error('Invalid kwarg specified to ClsRule (%s)', key)
continue
if not isinstance(value, (tuple, list)):
value = (value, )
setter(*value)
def set_in_port(self, port):
self.wc.wildcards &= ~FWW_IN_PORT
self.flow.in_port = port
def set_dl_vlan(self, dl_vlan):
self.wc.wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN
self.flow.dl_vlan = dl_vlan
def set_dl_vlan_pcp(self, dl_vlan_pcp):
self.wc.wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN_PCP
self.flow.dl_vlan_pcp = dl_vlan_pcp
def set_dl_dst(self, dl_dst):
self.flow.dl_dst = dl_dst
def set_dl_dst_masked(self, dl_dst, mask):
self.wc.dl_dst_mask = mask
# bit-wise and of the corresponding elements of dl_dst and mask
self.flow.dl_dst = mac.haddr_bitand(dl_dst, mask)
def set_dl_src(self, dl_src):
self.flow.dl_src = dl_src
def set_dl_src_masked(self, dl_src, mask):
self.wc.dl_src_mask = mask
self.flow.dl_src = mac.haddr_bitand(dl_src, mask)
def set_dl_type(self, dl_type):
self.wc.wildcards &= ~FWW_DL_TYPE
self.flow.dl_type = dl_type
def set_dl_tci(self, tci):
self.set_dl_tci_masked(tci, UINT16_MAX)
def set_dl_tci_masked(self, tci, mask):
self.wc.vlan_tci_mask = mask
self.flow.vlan_tci = tci
def set_tp_src(self, tp_src):
self.set_tp_src_masked(tp_src, UINT16_MAX)
def set_tp_src_masked(self, tp_src, mask):
self.wc.tp_src_mask = mask
self.flow.tp_src = tp_src & mask
def set_tp_dst(self, tp_dst):
self.set_tp_dst_masked(tp_dst, UINT16_MAX)
def set_tp_dst_masked(self, tp_dst, mask):
self.wc.tp_dst_mask = mask
self.flow.tp_dst = tp_dst & mask
def set_nw_proto(self, nw_proto):
self.wc.wildcards &= ~FWW_NW_PROTO
self.flow.nw_proto = nw_proto
def set_nw_src(self, nw_src):
self.set_nw_src_masked(nw_src, UINT32_MAX)
def set_nw_src_masked(self, nw_src, mask):
self.flow.nw_src = nw_src
self.wc.nw_src_mask = mask
def set_nw_dst(self, nw_dst):
self.set_nw_dst_masked(nw_dst, UINT32_MAX)
def set_nw_dst_masked(self, nw_dst, mask):
self.flow.nw_dst = nw_dst
self.wc.nw_dst_mask = mask
def set_nw_dscp(self, nw_dscp):
self.wc.wildcards &= ~FWW_NW_DSCP
self.flow.nw_tos &= ~IP_DSCP_MASK
self.flow.nw_tos |= nw_dscp & IP_DSCP_MASK
def set_icmp_type(self, icmp_type):
self.set_tp_src(icmp_type)
def set_icmp_code(self, icmp_code):
self.set_tp_dst(icmp_code)
def set_tun_id(self, tun_id):
self.set_tun_id_masked(tun_id, UINT64_MAX)
def set_tun_id_masked(self, tun_id, mask):
self.wc.tun_id_mask = mask
self.flow.tun_id = tun_id & mask
def set_nw_ecn(self, nw_ecn):
self.wc.wildcards &= ~FWW_NW_ECN
self.flow.nw_tos &= ~IP_ECN_MASK
self.flow.nw_tos |= nw_ecn & IP_ECN_MASK
def set_nw_ttl(self, nw_ttl):
self.wc.wildcards &= ~FWW_NW_TTL
self.flow.nw_ttl = nw_ttl
def set_nw_frag(self, nw_frag):
self.wc.nw_frag_mask |= FLOW_NW_FRAG_MASK
self.flow.nw_frag = nw_frag
def set_nw_frag_masked(self, nw_frag, mask):
self.wc.nw_frag_mask = mask
self.flow.nw_frag = nw_frag & mask
def set_arp_spa(self, spa):
self.set_arp_spa_masked(spa, UINT32_MAX)
def set_arp_spa_masked(self, spa, mask):
self.flow.arp_spa = spa
self.wc.arp_spa_mask = mask
def set_arp_tpa(self, tpa):
self.set_arp_tpa_masked(tpa, UINT32_MAX)
def set_arp_tpa_masked(self, tpa, mask):
self.flow.arp_tpa = tpa
self.wc.arp_tpa_mask = mask
def set_arp_sha(self, sha):
self.wc.wildcards &= ~FWW_ARP_SHA
self.flow.arp_sha = sha
def set_arp_tha(self, tha):
self.wc.wildcards &= ~FWW_ARP_THA
self.flow.arp_tha = tha
def set_icmpv6_type(self, icmp_type):
self.set_tp_src(icmp_type)
def set_icmpv6_code(self, icmp_code):
self.set_tp_dst(icmp_code)
def set_ipv6_label(self, label):
self.wc.wildcards &= ~FWW_IPV6_LABEL
self.flow.ipv6_label = label
def set_ipv6_src_masked(self, src, mask):
self.wc.ipv6_src_mask = mask
self.flow.ipv6_src = [x & y for (x, y) in zip(src, mask)]
def set_ipv6_src(self, src):
self.flow.ipv6_src = src
def set_ipv6_dst_masked(self, dst, mask):
self.wc.ipv6_dst_mask = mask
self.flow.ipv6_dst = [x & y for (x, y) in zip(dst, mask)]
def set_ipv6_dst(self, dst):
self.flow.ipv6_dst = dst
def set_nd_target_masked(self, target, mask):
self.wc.nd_target_mask = mask
self.flow.nd_target = [x & y for (x, y) in
zip(target, mask)]
def set_nd_target(self, target):
self.flow.nd_target = target
def set_reg(self, reg_idx, value):
self.set_reg_masked(reg_idx, value, 0)
def set_reg_masked(self, reg_idx, value, mask):
self.wc.regs_mask[reg_idx] = mask
self.flow.regs[reg_idx] = value
self.wc.regs_bits |= (1 << reg_idx)
def set_pkt_mark_masked(self, pkt_mark, mask):
self.flow.pkt_mark = pkt_mark
self.wc.pkt_mark_mask = mask
def set_tcp_flags(self, tcp_flags, mask):
self.flow.tcp_flags = tcp_flags
self.wc.tcp_flags_mask = mask
def flow_format(self):
# Tunnel ID is only supported by NXM
if self.wc.tun_id_mask != 0:
return ofproto_v1_0.NXFF_NXM
# Masking DL_DST is only supported by NXM
if self.wc.dl_dst_mask:
return ofproto_v1_0.NXFF_NXM
# Masking DL_SRC is only supported by NXM
if self.wc.dl_src_mask:
return ofproto_v1_0.NXFF_NXM
# ECN is only supported by NXM
if not self.wc.wildcards & FWW_NW_ECN:
return ofproto_v1_0.NXFF_NXM
if self.wc.regs_bits > 0:
return ofproto_v1_0.NXFF_NXM
if self.flow.tcp_flags > 0:
return ofproto_v1_0.NXFF_NXM
return ofproto_v1_0.NXFF_OPENFLOW10
def match_tuple(self):
"""return a tuple which can be used as *args for
ofproto_v1_0_parser.OFPMatch.__init__().
see Datapath.send_flow_mod.
"""
assert self.flow_format() == ofproto_v1_0.NXFF_OPENFLOW10
wildcards = ofproto_v1_0.OFPFW_ALL
if not self.wc.wildcards & FWW_IN_PORT:
wildcards &= ~ofproto_v1_0.OFPFW_IN_PORT
if self.flow.dl_src != mac.DONTCARE:
wildcards &= ~ofproto_v1_0.OFPFW_DL_SRC
if self.flow.dl_dst != mac.DONTCARE:
wildcards &= ~ofproto_v1_0.OFPFW_DL_DST
if not self.wc.wildcards & FWW_DL_TYPE:
wildcards &= ~ofproto_v1_0.OFPFW_DL_TYPE
if self.flow.dl_vlan != 0:
wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN
if self.flow.dl_vlan_pcp != 0:
wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN_PCP
if self.flow.nw_tos != 0:
wildcards &= ~ofproto_v1_0.OFPFW_NW_TOS
if self.flow.nw_proto != 0:
wildcards &= ~ofproto_v1_0.OFPFW_NW_PROTO
if self.wc.nw_src_mask != 0 and "01" not in bin(self.wc.nw_src_mask):
wildcards &= ~ofproto_v1_0.OFPFW_NW_SRC_MASK
maskbits = (bin(self.wc.nw_src_mask).count("0") - 1)
wildcards |= (maskbits << ofproto_v1_0.OFPFW_NW_SRC_SHIFT)
if self.wc.nw_dst_mask != 0 and "01" not in bin(self.wc.nw_dst_mask):
wildcards &= ~ofproto_v1_0.OFPFW_NW_DST_MASK
maskbits = (bin(self.wc.nw_dst_mask).count("0") - 1)
wildcards |= (maskbits << ofproto_v1_0.OFPFW_NW_DST_SHIFT)
if self.flow.tp_src != 0:
wildcards &= ~ofproto_v1_0.OFPFW_TP_SRC
if self.flow.tp_dst != 0:
wildcards &= ~ofproto_v1_0.OFPFW_TP_DST
return (wildcards, self.flow.in_port, self.flow.dl_src,
self.flow.dl_dst, self.flow.dl_vlan, self.flow.dl_vlan_pcp,
self.flow.dl_type, self.flow.nw_tos & IP_DSCP_MASK,
self.flow.nw_proto, self.flow.nw_src, self.flow.nw_dst,
self.flow.tp_src, self.flow.tp_dst)
def _set_nxm_headers(nxm_headers):
'''Annotate corresponding NXM header'''
def _set_nxm_headers_dec(self):
self.nxm_headers = nxm_headers
return self
return _set_nxm_headers_dec
def _register_make(cls):
'''class decorator to Register mf make'''
assert cls.nxm_headers is not None
assert cls.nxm_headers is not []
for nxm_header in cls.nxm_headers:
assert nxm_header not in _MF_FIELDS
_MF_FIELDS[nxm_header] = cls.make
return cls
def mf_from_nxm_header(nxm_header):
if nxm_header not in _MF_FIELDS:
return None
make = _MF_FIELDS.get(nxm_header)
assert make is not None
return make(nxm_header)
class MFField(object):
_FIELDS_HEADERS = {}
@staticmethod
def register_field_header(headers):
def _register_field_header(cls):
for header in headers:
MFField._FIELDS_HEADERS[header] = cls
return cls
return _register_field_header
def __init__(self, nxm_header, pack_str):
self.nxm_header = nxm_header
self.pack_str = pack_str
self.n_bytes = struct.calcsize(pack_str)
self.n_bits = self.n_bytes * 8
@classmethod
def parser(cls, buf, offset):
(header,) = struct.unpack_from('!I', buf, offset)
cls_ = MFField._FIELDS_HEADERS.get(header)
if cls_:
field = cls_.field_parser(header, buf, offset)
else:
# print 'unknown field type'
raise
field.length = (header & 0xff) + 4
return field
@classmethod
def field_parser(cls, header, buf, offset):
hasmask = (header >> 8) & 1
mask = None
if hasmask:
pack_str = '!' + cls.pack_str[1:] * 2
(value, mask) = struct.unpack_from(pack_str, buf,
offset + 4)
else:
(value,) = struct.unpack_from(cls.pack_str, buf,
offset + 4)
return cls(header, value, mask)
def _put(self, buf, offset, value):
msg_pack_into(self.pack_str, buf, offset, value)
return self.n_bytes
def putw(self, buf, offset, value, mask):
len_ = self._put(buf, offset, value)
return len_ + self._put(buf, offset + len_, mask)
def _is_all_ones(self, value):
return value == (1 << self.n_bits) - 1
def putm(self, buf, offset, value, mask):
if mask == 0:
return 0
elif self._is_all_ones(mask):
return self._put(buf, offset, value)
else:
return self.putw(buf, offset, value, mask)
def _putv6(self, buf, offset, value):
msg_pack_into(self.pack_str, buf, offset, *value)
return self.n_bytes
def putv6(self, buf, offset, value, mask):
len_ = self._putv6(buf, offset, value)
if len(mask):
return len_ + self._putv6(buf, offset + len_, mask)
return len_
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IN_PORT])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IN_PORT])
class MFInPort(MFField):
pack_str = MF_PACK_STRING_BE16
def __init__(self, header, value, mask=None):
super(MFInPort, self).__init__(header, MFInPort.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFInPort.pack_str)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.in_port)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ETH_DST, ofproto_v1_0.NXM_OF_ETH_DST_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_ETH_DST,
ofproto_v1_0.NXM_OF_ETH_DST_W])
class MFEthDst(MFField):
pack_str = MF_PACK_STRING_MAC
def __init__(self, header, value, mask=None):
super(MFEthDst, self).__init__(header, MFEthDst.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFEthDst.pack_str)
def put(self, buf, offset, rule):
if rule.wc.dl_dst_mask:
return self.putw(buf, offset, rule.flow.dl_dst,
rule.wc.dl_dst_mask)
else:
return self._put(buf, offset, rule.flow.dl_dst)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ETH_SRC, ofproto_v1_0.NXM_OF_ETH_SRC_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_ETH_SRC,
ofproto_v1_0.NXM_OF_ETH_SRC_W])
class MFEthSrc(MFField):
pack_str = MF_PACK_STRING_MAC
def __init__(self, header, value, mask=None):
super(MFEthSrc, self).__init__(header, MFEthSrc.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFEthSrc.pack_str)
def put(self, buf, offset, rule):
if rule.wc.dl_src_mask:
return self.putw(buf, offset, rule.flow.dl_src,
rule.wc.dl_src_mask)
else:
return self._put(buf, offset, rule.flow.dl_src)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ETH_TYPE])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_ETH_TYPE])
class MFEthType(MFField):
pack_str = MF_PACK_STRING_BE16
def __init__(self, header, value, mask=None):
super(MFEthType, self).__init__(header, MFEthType.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFEthType.pack_str)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.dl_type)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_VLAN_TCI,
ofproto_v1_0.NXM_OF_VLAN_TCI_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_VLAN_TCI,
ofproto_v1_0.NXM_OF_VLAN_TCI_W])
class MFVlan(MFField):
pack_str = MF_PACK_STRING_BE16
def __init__(self, header, value, mask=None):
super(MFVlan, self).__init__(header, MFVlan.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFVlan.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.vlan_tci,
rule.wc.vlan_tci_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_TOS])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IP_TOS])
class MFIPDSCP(MFField):
pack_str = MF_PACK_STRING_8
def __init__(self, header, value, mask=None):
super(MFIPDSCP, self).__init__(header, MFIPDSCP.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFIPDSCP.pack_str)
def put(self, buf, offset, rule):
return self._put(buf, offset,
rule.flow.nw_tos & IP_DSCP_MASK)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_TUN_ID,
ofproto_v1_0.NXM_NX_TUN_ID_W])
@MFField.register_field_header([ofproto_v1_0.NXM_NX_TUN_ID,
ofproto_v1_0.NXM_NX_TUN_ID_W])
class MFTunId(MFField):
pack_str = MF_PACK_STRING_BE64
def __init__(self, header, value, mask=None):
super(MFTunId, self).__init__(header, MFTunId.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFTunId.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.tun_id, rule.wc.tun_id_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_SRC, ofproto_v1_0.NXM_OF_IP_SRC_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IP_SRC,
ofproto_v1_0.NXM_OF_IP_SRC_W])
class MFIPSrc(MFField):
pack_str = MF_PACK_STRING_BE32
def __init__(self, header, value, mask=None):
super(MFIPSrc, self).__init__(header, MFIPSrc.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, MFIPSrc.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.nw_src, rule.wc.nw_src_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_DST, ofproto_v1_0.NXM_OF_IP_DST_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IP_DST,
ofproto_v1_0.NXM_OF_IP_DST_W])
class MFIPDst(MFField):
pack_str = MF_PACK_STRING_BE32
def __init__(self, header, value, mask=None):
super(MFIPDst, self).__init__(header, MFIPDst.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, MFIPDst.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.nw_dst, rule.wc.nw_dst_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IP_ECN])
class MFIPECN(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset,
rule.flow.nw_tos & IP_ECN_MASK)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IP_TTL])
class MFIPTTL(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.nw_ttl)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_PROTO])
class MFIPProto(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.nw_proto)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_TCP_SRC, ofproto_v1_0.NXM_OF_TCP_SRC_W,
ofproto_v1_0.NXM_OF_UDP_SRC, ofproto_v1_0.NXM_OF_UDP_SRC_W])
class MFTPSRC(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE16)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.tp_src, rule.wc.tp_src_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_TCP_DST, ofproto_v1_0.NXM_OF_TCP_DST_W,
ofproto_v1_0.NXM_OF_UDP_DST, ofproto_v1_0.NXM_OF_UDP_DST_W])
class MFTPDST(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE16)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.tp_dst, rule.wc.tp_dst_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ARP_SPA, ofproto_v1_0.NXM_OF_ARP_SPA_W])
class MFArpSpa(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.arp_spa, rule.wc.arp_spa_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ARP_TPA, ofproto_v1_0.NXM_OF_ARP_TPA_W])
class MFArpTpa(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.arp_tpa, rule.wc.arp_tpa_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ARP_SHA])
class MFArpSha(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_MAC)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.arp_sha)
class MFIPV6(object):
pack_str = MF_PACK_STRING_IPV6
@classmethod
def field_parser(cls, header, buf, offset):
hasmask = (header >> 8) & 1
if hasmask:
pack_string = '!' + cls.pack_str[1:] * 2
value = struct.unpack_from(pack_string, buf, offset + 4)
return cls(header, list(value[:8]), list(value[8:]))
else:
value = struct.unpack_from(cls.pack_str, buf, offset + 4)
return cls(header, list(value))
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IPV6_SRC,
ofproto_v1_0.NXM_NX_IPV6_SRC_W])
@MFField.register_field_header([ofproto_v1_0.NXM_NX_IPV6_SRC,
ofproto_v1_0.NXM_NX_IPV6_SRC_W])
class MFIPV6Src(MFIPV6, MFField):
def __init__(self, header, value, mask=None):
super(MFIPV6Src, self).__init__(header, MFIPV6Src.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, cls.pack_str)
def put(self, buf, offset, rule):
return self.putv6(buf, offset,
rule.flow.ipv6_src,
rule.wc.ipv6_src_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IPV6_DST,
ofproto_v1_0.NXM_NX_IPV6_DST_W])
@MFField.register_field_header([ofproto_v1_0.NXM_NX_IPV6_DST,
ofproto_v1_0.NXM_NX_IPV6_DST_W])
class MFIPV6Dst(MFIPV6, MFField):
def __init__(self, header, value, mask=None):
super(MFIPV6Dst, self).__init__(header, MFIPV6Dst.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, cls.pack_str)
def put(self, buf, offset, rule):
return self.putv6(buf, offset,
rule.flow.ipv6_dst,
rule.wc.ipv6_dst_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ND_TARGET,
ofproto_v1_0.NXM_NX_ND_TARGET_W])
class MFNdTarget(MFField):
@classmethod
def make(cls, header):
return cls(header, '!4I')
def put(self, buf, offset, rule):
return self.putv6(buf, offset,
rule.flow.nd_target,
rule.wc.nd_target_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IP_FRAG,
ofproto_v1_0.NXM_NX_IP_FRAG_W])
class MFIpFrag(MFField):
@classmethod
def make(cls, header):
return cls(header, '!B')
def put(self, buf, offset, rule):
if rule.wc.nw_frag_mask == FLOW_NW_FRAG_MASK:
return self._put(buf, offset, rule.flow.nw_frag)
else:
return self.putw(buf, offset, rule.flow.nw_frag,
rule.wc.nw_frag_mask & FLOW_NW_FRAG_MASK)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ARP_THA])
class MFArpTha(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_MAC)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.arp_tha)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ICMP_TYPE])
class MFICMPType(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_src)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ICMP_CODE])
class MFICMPCode(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_dst)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ICMPV6_TYPE])
class MFICMPV6Type(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_src)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ICMPV6_CODE])
class MFICMPV6Code(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_dst)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IPV6_LABEL])
class MFICMPV6Label(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.ipv6_label)
@_register_make
@_set_nxm_headers([ofproto_v1_0.nxm_nx_reg(i) for i in range(FLOW_N_REGS)]
+ [ofproto_v1_0.nxm_nx_reg_w(i) for i in range(FLOW_N_REGS)])
class MFRegister(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
for i in range(FLOW_N_REGS):
if (ofproto_v1_0.nxm_nx_reg(i) == self.nxm_header or
ofproto_v1_0.nxm_nx_reg_w(i) == self.nxm_header):
if rule.wc.regs_mask[i]:
return self.putm(buf, offset, rule.flow.regs[i],
rule.wc.regs_mask[i])
else:
return self._put(buf, offset, rule.flow.regs[i])
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_PKT_MARK,
ofproto_v1_0.NXM_NX_PKT_MARK_W])
class MFPktMark(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.pkt_mark,
rule.wc.pkt_mark_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_TCP_FLAGS,
ofproto_v1_0.NXM_NX_TCP_FLAGS_W])
class MFTcpFlags(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE16)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.tcp_flags,
rule.wc.tcp_flags_mask)
def serialize_nxm_match(rule, buf, offset):
old_offset = offset
if not rule.wc.wildcards & FWW_IN_PORT:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_IN_PORT, rule)
# Ethernet.
if rule.flow.dl_dst != mac.DONTCARE:
if rule.wc.dl_dst_mask:
header = ofproto_v1_0.NXM_OF_ETH_DST_W
else:
header = ofproto_v1_0.NXM_OF_ETH_DST
offset += nxm_put(buf, offset, header, rule)
if rule.flow.dl_src != mac.DONTCARE:
if rule.wc.dl_src_mask:
header = ofproto_v1_0.NXM_OF_ETH_SRC_W
else:
header = ofproto_v1_0.NXM_OF_ETH_SRC
offset += nxm_put(buf, offset, header, rule)
if not rule.wc.wildcards & FWW_DL_TYPE:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_ETH_TYPE, rule)
# 802.1Q
if rule.wc.vlan_tci_mask != 0:
if rule.wc.vlan_tci_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_VLAN_TCI
else:
header = ofproto_v1_0.NXM_OF_VLAN_TCI_W
offset += nxm_put(buf, offset, header, rule)
# L3
if not rule.wc.wildcards & FWW_NW_DSCP:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_IP_TOS, rule)
if not rule.wc.wildcards & FWW_NW_ECN:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_IP_ECN, rule)
if not rule.wc.wildcards & FWW_NW_TTL:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_IP_TTL, rule)
if not rule.wc.wildcards & FWW_NW_PROTO:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_IP_PROTO, rule)
if not rule.wc.wildcards & FWW_NW_PROTO and (rule.flow.nw_proto
== inet.IPPROTO_ICMP):
if rule.wc.tp_src_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_ICMP_TYPE, rule)
if rule.wc.tp_dst_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_ICMP_CODE, rule)
if rule.flow.tp_src != 0:
if rule.flow.nw_proto == 6:
if rule.wc.tp_src_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_TCP_SRC
else:
header = ofproto_v1_0.NXM_OF_TCP_SRC_W
elif rule.flow.nw_proto == 17:
if rule.wc.tp_src_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_UDP_SRC
else:
header = ofproto_v1_0.NXM_OF_UDP_SRC_W
else:
header = 0
if header != 0:
offset += nxm_put(buf, offset, header, rule)
if rule.flow.tp_dst != 0:
if rule.flow.nw_proto == 6:
if rule.wc.tp_dst_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_TCP_DST
else:
header = ofproto_v1_0.NXM_OF_TCP_DST_W
elif rule.flow.nw_proto == 17:
if rule.wc.tp_dst_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_UDP_DST
else:
header = ofproto_v1_0.NXM_OF_UDP_DST_W
else:
header = 0
if header != 0:
offset += nxm_put(buf, offset, header, rule)
if rule.flow.tcp_flags != 0:
# TCP Flags can only be used if the ethernet type is IPv4 or IPv6
if rule.flow.dl_type in (ether.ETH_TYPE_IP, ether.ETH_TYPE_IPV6):
# TCP Flags can only be used if the ip protocol is TCP
if rule.flow.nw_proto == inet.IPPROTO_TCP:
if rule.wc.tcp_flags_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_NX_TCP_FLAGS
else:
header = ofproto_v1_0.NXM_NX_TCP_FLAGS_W
else:
header = 0
else:
header = 0
if header != 0:
offset += nxm_put(buf, offset, header, rule)
# IP Source and Destination
if rule.flow.nw_src != 0:
if rule.wc.nw_src_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_IP_SRC
else:
header = ofproto_v1_0.NXM_OF_IP_SRC_W
offset += nxm_put(buf, offset, header, rule)
if rule.flow.nw_dst != 0:
if rule.wc.nw_dst_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_IP_DST
else:
header = ofproto_v1_0.NXM_OF_IP_DST_W
offset += nxm_put(buf, offset, header, rule)
# IPv6
if not rule.wc.wildcards & FWW_NW_PROTO and (rule.flow.nw_proto
== inet.IPPROTO_ICMPV6):
if rule.wc.tp_src_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ICMPV6_TYPE,
rule)
if rule.wc.tp_dst_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ICMPV6_CODE,
rule)
if not rule.wc.wildcards & FWW_IPV6_LABEL:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_IPV6_LABEL, rule)
if len(rule.flow.ipv6_src):
if len(rule.wc.ipv6_src_mask):
header = ofproto_v1_0.NXM_NX_IPV6_SRC_W
else:
header = ofproto_v1_0.NXM_NX_IPV6_SRC
offset += nxm_put(buf, offset, header, rule)
if len(rule.flow.ipv6_dst):
if len(rule.wc.ipv6_dst_mask):
header = ofproto_v1_0.NXM_NX_IPV6_DST_W
else:
header = ofproto_v1_0.NXM_NX_IPV6_DST
offset += nxm_put(buf, offset, header, rule)
if len(rule.flow.nd_target):
if len(rule.wc.nd_target_mask):
header = ofproto_v1_0.NXM_NX_ND_TARGET_W
else:
header = ofproto_v1_0.NXM_NX_ND_TARGET
offset += nxm_put(buf, offset, header, rule)
# ARP
if rule.flow.arp_spa != 0:
if rule.wc.arp_spa_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_ARP_SPA
else:
header = ofproto_v1_0.NXM_OF_ARP_SPA_W
offset += nxm_put(buf, offset, header, rule)
if rule.flow.arp_tpa != 0:
if rule.wc.arp_tpa_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_ARP_TPA
else:
header = ofproto_v1_0.NXM_OF_ARP_TPA_W
offset += nxm_put(buf, offset, header, rule)
if not rule.wc.wildcards & FWW_ARP_SHA:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ARP_SHA, rule)
if not rule.wc.wildcards & FWW_ARP_THA:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ARP_THA, rule)
if rule.flow.nw_frag:
if rule.wc.nw_frag_mask == FLOW_NW_FRAG_MASK:
header = ofproto_v1_0.NXM_NX_IP_FRAG
else:
header = ofproto_v1_0.NXM_NX_IP_FRAG_W
offset += nxm_put(buf, offset, header, rule)
if rule.flow.pkt_mark != 0:
if rule.wc.pkt_mark_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_NX_PKT_MARK
else:
header = ofproto_v1_0.NXM_NX_PKT_MARK_W
offset += nxm_put(buf, offset, header, rule)
# Tunnel Id
if rule.wc.tun_id_mask != 0:
if rule.wc.tun_id_mask == UINT64_MAX:
header = ofproto_v1_0.NXM_NX_TUN_ID
else:
header = ofproto_v1_0.NXM_NX_TUN_ID_W
offset += nxm_put(buf, offset, header, rule)
# XXX: Cookie
for i in range(FLOW_N_REGS):
if rule.wc.regs_bits & (1 << i):
if rule.wc.regs_mask[i]:
header = ofproto_v1_0.nxm_nx_reg_w(i)
else:
header = ofproto_v1_0.nxm_nx_reg(i)
offset += nxm_put(buf, offset, header, rule)
# Pad
pad_len = round_up(offset) - offset
msg_pack_into("%dx" % pad_len, buf, offset)
# The returned length, the match_len, does not include the pad
return offset - old_offset
def nxm_put(buf, offset, header, rule):
nxm = NXMatch(header)
len_ = nxm.put_header(buf, offset)
mf = mf_from_nxm_header(nxm.header)
return len_ + mf.put(buf, offset + len_, rule)
def round_up(length):
return (length + 7) // 8 * 8 # Round up to a multiple of 8
class NXMatch(object):
def __init__(self, header):
self.header = header
@classmethod
def parser(cls, buf, offset, match_len):
if match_len < 4:
raise exception.OFPMalformedMessage
(header,) = struct.unpack_from(ofproto_v1_0.NXM_HEADER_PACK_STRING,
buf, offset)
instance = cls(header)
payload_len = instance.length()
if payload_len == 0 or match_len < payload_len + 4:
raise exception.OFPMalformedMessage
return instance
def vendor(self):
return self.header >> 16
def field(self):
return (self.header >> 9) % 0x7f
def type(self):
return (self.header >> 9) % 0x7fffff
def hasmask(self):
return (self.header >> 8) & 1
def length(self):
return self.header & 0xff
def show(self):
return ('%08x (vendor=%x, field=%x, hasmask=%x len=%x)' %
(self.header, self.vendor(), self.field(),
self.hasmask(), self.length()))
def put_header(self, buf, offset):
msg_pack_into(ofproto_v1_0.NXM_HEADER_PACK_STRING,
buf, offset, self.header)
return struct.calcsize(ofproto_v1_0.NXM_HEADER_PACK_STRING)
#
# The followings are implementations for OpenFlow 1.2+
#
sys.modules[__name__].__doc__ = """
The API of this class is the same as ``OFPMatch``.
You can define the flow match by the keyword arguments.
The following arguments are available.
================ =============== ==============================================
Argument Value Description
================ =============== ==============================================
eth_dst_nxm MAC address Ethernet destination address.
eth_src_nxm MAC address Ethernet source address.
eth_type_nxm Integer 16bit Ethernet type. Needed to support Nicira
extensions that require the eth_type to
be set. (i.e. tcp_flags_nxm)
ip_proto_nxm Integer 8bit IP protocol. Needed to support Nicira
extensions that require the ip_proto to
be set. (i.e. tcp_flags_nxm)
tunnel_id_nxm Integer 64bit Tunnel identifier.
tun_ipv4_src IPv4 address Tunnel IPv4 source address.
tun_ipv4_dst IPv4 address Tunnel IPv4 destination address.
pkt_mark Integer 32bit Packet metadata mark.
tcp_flags_nxm Integer 16bit TCP Flags. Requires setting fields:
eth_type_nxm = [0x0800 (IP)|0x86dd (IPv6)] and
ip_proto_nxm = 6 (TCP)
conj_id Integer 32bit Conjunction ID used only with
the conjunction action
ct_state Integer 32bit Conntrack state.
ct_zone Integer 16bit Conntrack zone.
ct_mark Integer 32bit Conntrack mark.
ct_label Integer 128bit Conntrack label.
_dp_hash Integer 32bit Flow hash computed in Datapath.
reg<idx> Integer 32bit Packet register.
<idx> is register number 0-7.
================ =============== ==============================================
.. Note::
Setting the TCP flags via the nicira extensions.
This is required when using OVS version < 2.4.
When using the nxm fields, you need to use any nxm prereq
fields as well or you will receive a OFPBMC_BAD_PREREQ error
Example::
# WILL NOT work
flag = tcp.TCP_ACK
match = parser.OFPMatch(
tcp_flags_nxm=(flag, flag),
ip_proto=inet.IPPROTO_TCP,
eth_type=eth_type)
# Works
flag = tcp.TCP_ACK
match = parser.OFPMatch(
tcp_flags_nxm=(flag, flag),
ip_proto_nxm=inet.IPPROTO_TCP,
eth_type_nxm=eth_type)
"""
oxm_types = [
oxm_fields.NiciraExtended0('eth_dst_nxm', 1, type_desc.MacAddr),
oxm_fields.NiciraExtended0('eth_src_nxm', 2, type_desc.MacAddr),
oxm_fields.NiciraExtended0('eth_type_nxm', 3, type_desc.Int2),
oxm_fields.NiciraExtended0('ip_proto_nxm', 6, type_desc.Int1),
oxm_fields.NiciraExtended1('tunnel_id_nxm', 16, type_desc.Int8),
oxm_fields.NiciraExtended1('tun_ipv4_src', 31, type_desc.IPv4Addr),
oxm_fields.NiciraExtended1('tun_ipv4_dst', 32, type_desc.IPv4Addr),
oxm_fields.NiciraExtended1('pkt_mark', 33, type_desc.Int4),
oxm_fields.NiciraExtended1('tcp_flags_nxm', 34, type_desc.Int2),
oxm_fields.NiciraExtended1('conj_id', 37, type_desc.Int4),
oxm_fields.NiciraExtended1('ct_state', 105, type_desc.Int4),
oxm_fields.NiciraExtended1('ct_zone', 106, type_desc.Int2),
oxm_fields.NiciraExtended1('ct_mark', 107, type_desc.Int4),
oxm_fields.NiciraExtended1('ct_label', 108, type_desc.Int16),
# The following definition is merely for testing 64-bit experimenter OXMs.
# Following Open vSwitch, we use dp_hash for this purpose.
# Prefix the name with '_' to indicate this is not intended to be used
# in wild.
oxm_fields.NiciraExperimenter('_dp_hash', 0, type_desc.Int4),
# Support for matching/setting NX registers 0-7
oxm_fields.NiciraExtended1('reg0', 0, type_desc.Int4),
oxm_fields.NiciraExtended1('reg1', 1, type_desc.Int4),
oxm_fields.NiciraExtended1('reg2', 2, type_desc.Int4),
oxm_fields.NiciraExtended1('reg3', 3, type_desc.Int4),
oxm_fields.NiciraExtended1('reg4', 4, type_desc.Int4),
oxm_fields.NiciraExtended1('reg5', 5, type_desc.Int4),
oxm_fields.NiciraExtended1('reg6', 6, type_desc.Int4),
oxm_fields.NiciraExtended1('reg7', 7, type_desc.Int4),
]
| apache-2.0 |
gri-is/lodjob | crom_scripts/datasets/knoedler/row_document.py | 1 | 2739 | from cromulent.model import *
from cromulent.vocab import *
from utils.aat_labels import aat_labels
from utils.aat_label_fetcher import get_or_fetch
from utils.data_parsing import find_values
from utils.crom_helpers import props, toJSON, toString, printString, printAttr,\
type_maker, datetime_maker, date_maker
make_type = type_maker(
vocab='aat:', getter=get_or_fetch, labels=aat_labels, Class=Type)
def cromify(record):
o = LinguisticObject(ident=record.pi_record_no) # or no ident?
o.label = 'Record Text'
o.classified_as = make_type(300027146) # or 300026685
star_record_no = Identifier()
star_record_no.label = 'star_record_no' # STAR record number
star_record_no.value = record.star_record_no
o.identified_by = star_record_no
pi_record_no = Identifier()
pi_record_no.label = 'pi_record_no' # Provenance Index record number
pi_record_no.value = record.pi_record_no
o.identified_by = pi_record_no
row_number = Identifier()
row_number.label = 'row_number' # row number
row_number.value = record.row_number
o.identified_by = row_number
# Does this connection needs to be made on page side to reduce redundancy?
page_id = '%s-%s' % (record.stock_book_no, record.page_number)
page = LinguisticObject(ident=page_id)
#page.value = record.page_number
#page.classified_as = make_type(300200294) # pagination
o.referred_to_by = page
# Page Dimensions
# http://linked.art/model/document/index.html#page-dimensions
# (not used)
# Creation
# http://linked.art/model/document/index.html#creation-and-publication
# (not used)
# linguistic objects and their AAT IDs:
fields_and_ids = [
('description', 300027200),
('subject', 300404126),
('genre', 300056462),
('object_type', 300191790),
('materials', 300010358),
('dimensions', 300266036),
('working_note', 300027200),
('verbatim_notes', 300027200),
]
for field, aat_id in fields_and_ids:
text = getattr(record, field)
if text:
lo = LinguisticObject()
lo.label = text
lo.classified_as = make_type(aat_id) # notes
o.part = lo
mmo = ManMadeObject(ident=record.object_id)
o.refers_to = mmo
if record.sale_event_id:
sale = Acquisition(ident=record.sale_event_id)
o.refers_to = sale
if record.purchase_event_id:
purchase = Acquisition(ident=record.purchase_event_id)
o.refers_to = purchase
elif record.inventory_event_id:
inventory = Activity(ident=record.inventory_event_id)
o.refers_to = inventory
#printString(o)
return o
| agpl-3.0 |
drjeep/django | django/contrib/auth/mixins.py | 305 | 4087 | from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.views import redirect_to_login
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.utils import six
from django.utils.encoding import force_text
class AccessMixin(object):
"""
Abstract CBV mixin that gives access mixins the same customizable
functionality.
"""
login_url = None
permission_denied_message = ''
raise_exception = False
redirect_field_name = REDIRECT_FIELD_NAME
def get_login_url(self):
"""
Override this method to override the login_url attribute.
"""
login_url = self.login_url or settings.LOGIN_URL
if not login_url:
raise ImproperlyConfigured(
'{0} is missing the login_url attribute. Define {0}.login_url, settings.LOGIN_URL, or override '
'{0}.get_login_url().'.format(self.__class__.__name__)
)
return force_text(login_url)
def get_permission_denied_message(self):
"""
Override this method to override the permission_denied_message attribute.
"""
return self.permission_denied_message
def get_redirect_field_name(self):
"""
Override this method to override the redirect_field_name attribute.
"""
return self.redirect_field_name
def handle_no_permission(self):
if self.raise_exception:
raise PermissionDenied(self.get_permission_denied_message())
return redirect_to_login(self.request.get_full_path(), self.get_login_url(), self.get_redirect_field_name())
class LoginRequiredMixin(AccessMixin):
"""
CBV mixin which verifies that the current user is authenticated.
"""
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated():
return self.handle_no_permission()
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs)
class PermissionRequiredMixin(AccessMixin):
"""
CBV mixin which verifies that the current user has all specified
permissions.
"""
permission_required = None
def get_permission_required(self):
"""
Override this method to override the permission_required attribute.
Must return an iterable.
"""
if self.permission_required is None:
raise ImproperlyConfigured(
'{0} is missing the permission_required attribute. Define {0}.permission_required, or override '
'{0}.get_permission_required().'.format(self.__class__.__name__)
)
if isinstance(self.permission_required, six.string_types):
perms = (self.permission_required, )
else:
perms = self.permission_required
return perms
def has_permission(self):
"""
Override this method to customize the way permissions are checked.
"""
perms = self.get_permission_required()
return self.request.user.has_perms(perms)
def dispatch(self, request, *args, **kwargs):
if not self.has_permission():
return self.handle_no_permission()
return super(PermissionRequiredMixin, self).dispatch(request, *args, **kwargs)
class UserPassesTestMixin(AccessMixin):
"""
CBV Mixin that allows you to define a test function which must return True
if the current user can access the view.
"""
def test_func(self):
raise NotImplementedError(
'{0} is missing the implementation of the test_func() method.'.format(self.__class__.__name__)
)
def get_test_func(self):
"""
Override this method to use a different test_func method.
"""
return self.test_func
def dispatch(self, request, *args, **kwargs):
user_test_result = self.get_test_func()()
if not user_test_result:
return self.handle_no_permission()
return super(UserPassesTestMixin, self).dispatch(request, *args, **kwargs)
| bsd-3-clause |
adamjmcgrath/glancydesign | django/contrib/webdesign/templatetags/webdesign.py | 350 | 2196 | from django.contrib.webdesign.lorem_ipsum import words, paragraphs
from django import template
register = template.Library()
class LoremNode(template.Node):
def __init__(self, count, method, common):
self.count, self.method, self.common = count, method, common
def render(self, context):
try:
count = int(self.count.resolve(context))
except (ValueError, TypeError):
count = 1
if self.method == 'w':
return words(count, common=self.common)
else:
paras = paragraphs(count, common=self.common)
if self.method == 'p':
paras = ['<p>%s</p>' % p for p in paras]
return u'\n\n'.join(paras)
#@register.tag
def lorem(parser, token):
"""
Creates random Latin text useful for providing test data in templates.
Usage format::
{% lorem [count] [method] [random] %}
``count`` is a number (or variable) containing the number of paragraphs or
words to generate (default is 1).
``method`` is either ``w`` for words, ``p`` for HTML paragraphs, ``b`` for
plain-text paragraph blocks (default is ``b``).
``random`` is the word ``random``, which if given, does not use the common
paragraph (starting "Lorem ipsum dolor sit amet, consectetuer...").
Examples:
* ``{% lorem %}`` will output the common "lorem ipsum" paragraph
* ``{% lorem 3 p %}`` will output the common "lorem ipsum" paragraph
and two random paragraphs each wrapped in HTML ``<p>`` tags
* ``{% lorem 2 w random %}`` will output two random latin words
"""
bits = list(token.split_contents())
tagname = bits[0]
# Random bit
common = bits[-1] != 'random'
if not common:
bits.pop()
# Method bit
if bits[-1] in ('w', 'p', 'b'):
method = bits.pop()
else:
method = 'b'
# Count bit
if len(bits) > 1:
count = bits.pop()
else:
count = '1'
count = parser.compile_filter(count)
if len(bits) != 1:
raise template.TemplateSyntaxError("Incorrect format for %r tag" % tagname)
return LoremNode(count, method, common)
lorem = register.tag(lorem)
| bsd-3-clause |
dhp-denero/LibrERP | web_client/ea_web-github/doc/source/conf.py | 10 | 8260 | # -*- coding: utf-8 -*-
#
# OpenERP Web documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 18 16:31:55 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../addons'))
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage' ]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OpenERP Web'
copyright = u'2011, OpenERP S.A.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '6.1'
# The full version, including alpha/beta/rc tags.
release = '6.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenERPWebdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'OpenERPWeb.tex', u'OpenERP Web Documentation',
u'OpenERP S.A.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'openerpweb', u'OpenERP Web Documentation',
[u'OpenERP S.A.'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'OpenERP Web'
epub_author = u'OpenERP S.A.'
epub_publisher = u'OpenERP S.A.'
epub_copyright = u'2011, OpenERP S.A.'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| agpl-3.0 |
mrcaps/wikimedia-analysis | scrape.py | 1 | 10214 | """
Scrape things:
* a set of Ganglia pages for underlying raw data.
* a set of bugs for parsing out gerrit links
"""
import urllib
try:
from urllib2 import Request, urlopen, URLError
except:
from urllib.request import Request, urlopen
from urllib.error import URLError
import sys
import unittest
import os
import time
import random
import re
import csv
import traceback
#require: easy_install beautifulsoup4
import bs4
from bs4 import BeautifulSoup
#require: easy_install xmltodict
import xmltodict
import json
import logging as log
log.basicConfig(level=log.INFO)
def getpage(loc, data=None, header_override=None):
#data = urllib.urlencode({"k": "v"})
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1468.0 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.3",
"Accept-Encoding": "none",
"Accept-Language": "en-US,en;q=0.8",
"Cache-Control": "max-age=0",
"Connection": "keep-alive"}
if header_override is not None:
for (k, v) in header_override.items():
headers[k] = v
req = Request(loc, data, headers)
try:
return urlopen(req)
except (URLError, e):
log.error("Couldn't open URL %s" % loc)
log.error(e)
return None
def readpage(loc):
pl = getpage(loc)
if pl is None:
return None
else:
return pl.read()
def readstatic(loc):
with open(loc, "r") as fp:
return fp.read()
class RequestTest(unittest.TestCase):
def test_req(self):
pth = "http://www.mrcaps.com"
#pth = "http://ganglia.wikimedia.org/latest/?t=yes&m=cpu_report&r=year&s=by%20name&hc=4&mc=2"
self.assertTrue( len(readpage(pth)) > 1000 )
def test_readstatic(self):
self.assertTrue( len(readstatic("testdata/ganglia-home.html")) > 1000 )
def test_downloader(self):
down = Downloader()
sources = down.extract_sources(readstatic("testdata/ganglia-home.html"))
hosts = down.extract_nodes(readstatic("testdata/ganglia-nodes.html"))
csvs = down.extract_csvs(readstatic("testdata/ganglia-node.html"))
def try_makedirs(pth):
try:
os.makedirs(pth)
except:
pass
def readpage_cache(loc, writeto, usecache, dlsleep=True):
"""
Args:
dlsleep: sleep a random amount before downloading
"""
if os.path.exists(writeto) and usecache:
return readstatic(writeto)
else:
log.info("Downloading " + loc)
try_makedirs(os.path.split(writeto)[0])
if dlsleep:
time.sleep(random.random())
cont = readpage(loc)
if cont is not None:
with open(writeto, "w") as fp:
fp.write(cont)
return cont
class Downloader(object):
def __init__(self):
pass
def run(self, baseurl, outdir, usecache=True):
def getloc(*parts):
return os.path.join(*([outdir] + list(parts)))
rootcont = readpage_cache(baseurl, getloc("index.html"), usecache)
srcs = self.extract_sources(rootcont)
for src in srcs:
srccont = readpage_cache(
self.get_source_url(baseurl, src),
getloc(src, "index.html"),
usecache)
for node in self.extract_nodes(srccont):
nodecont = readpage_cache(
self.get_node_url(baseurl, src, node),
getloc(src, node + ".html"),
usecache)
for csvp in self.extract_csvs(nodecont):
l = csvp.find("m=")
if l < 0:
log.warn("Could not find metric in plot csv url")
mname = csvp[l+2:]
mname = mname[:mname.find("&")]
csvc = readpage_cache(
baseurl + csvp,
getloc(src, node, mname + ".csv"),
usecache)
def parse_tree(self, treecont):
"""
Fragile; don't use me.
Args:
treecont: content of ganglia tree
"""
soup = BeautifulSoup(treecont)
#fragile!
links = soup.find_all("table")[1].find_all("a")
dct = dict()
for lnk in links:
dct[lnk.contents[0]] = lnk["href"]
return dct
def get_source_url(self, baseurl, source, timerange="year"):
return "%s/?c=%s&r=%s" % (baseurl, source, timerange)
def get_node_url(self, baseurl, source, node, timerange="year"):
return "%s/?c=%s&h=%s&r=%s" % (baseurl, source, node, timerange)
def extract_sources(self, pagecont):
"""Get list of sources from the given page content."""
soup = BeautifulSoup(pagecont)
sources = soup.find_all("select", attrs={"name": "c"})[0]
srcvals = []
for src in sources:
if type(src) == bs4.element.Tag:
v = src["value"]
if v != "":
srcvals.append(v)
return srcvals
def extract_nodes(self, pagecont):
"""Get node names from the given page content."""
soup = BeautifulSoup(pagecont)
hosts = soup.find_all("select", attrs={"name": "h"})[0]
hostvals = []
for host in hosts:
if type(host) == bs4.element.Tag:
v = host["value"]
if v != "":
hostvals.append(v)
return hostvals
def extract_csvs(self, pagecont):
"""Get CSV data links from the given page content."""
soup = BeautifulSoup(pagecont)
csvs = soup.find_all("button", attrs={"title": "Export to CSV"})
hrefs = []
for lnk in csvs:
ocl = lnk["onclick"]
ocl = ocl[ocl.find("'")+1:ocl.rfind("'")]
if ocl.startswith("./"):
ocl = ocl[2:]
hrefs.append(ocl)
return hrefs
def extract_metrics(self, pagecont):
"""Get monitoring metrics from the given page content."""
soup = BeautifulSoup(pagecont)
#fragile!
metrics = soup.find_all(id="metrics-picker")[0]
metvals = []
for met in metrics:
metvals.append(met["value"])
return metvals
def run_downloader():
down = Downloader()
log.info("start download")
down.run("http://ganglia.wikimedia.org/latest/", "data")
log.info("done download")
class BugScraper():
def __init__(self):
pass
def run(self, bugzilla_loc, first_bug=1, last_bug=52535, outdir="bugs", outfile="bugs.json"):
"""Grab list of bugs from bugzilla, dump to outfile.
Args:
bugzilla_loc: location of bugzilla main, including trailing forwardslash
last_bug: last bug id
"""
try:
os.makedirs(outdir)
except:
pass
def get_bug_path(bid):
return os.path.join(outdir, "%s.json" % (bid))
for bug in range(first_bug, last_bug):
log.info("Grab bug %d" % bug)
bout = get_bug_path(bug)
if not os.path.exists(bout):
try:
dlpage = "%sshow_bug.cgi?ctype=xml&id=%s" % (bugzilla_loc, bug)
dct = xmltodict.parse(getpage(dlpage).read())
#write out to separate dir
with open(bout, "w") as fp:
json.dump(dct, fp)
except:
log.error("Couldn't download bug %d" % bug)
traceback.print_exc()
time.sleep(random.random() * 0.1)
bugs = dict()
for bug in range(first_bug, last_bug):
log.info("Collect bug %d" % bug)
bout = get_bug_path(bug)
with open(bout, "r") as fp:
bjs = json.load(fp)
bugs[bug] = bjs["bugzilla"]["bug"]
with open(outfile, "w") as fp:
json.dump(bugs, fp, indent=4)
def correlate_changeids(self, outdir="bugs/changeids", infile="bugs.json", outfile="bugs-commits.csv"):
"""For each bug in bugs, determine if it has a gerrit change.
For those that do, find the gerrit change and get the associated commit.
Dump those commits to bugs/changeids
"""
bugs = None
with open(infile, "r") as fp:
bugs = json.load(fp)
try:
os.makedirs(outdir)
except:
pass
def get_out_path(bid):
return os.path.join(outdir, "%s.json" % (bid))
gerrit_url = "https://gerrit.wikimedia.org/"
pat = re.compile(gerrit_url + "r/#/c/(\\d+)/")
def get_gerrit_change_detail(cid):
url = "%sr/changes/%d/detail" % (gerrit_url, cid)
detail = getpage(url).read()
#)]}' at the beginning of the change
CRUFT_LENGTH = 4
return json.loads(detail[4:])
def get_gerrit_change_detail_service(cid):
"""Get UI change detail for the given change id
This isn't really guaranteed to keep working, but gives revision hashes.
(in .result.patchSets[n].revision.id)
"""
url = "%sr/gerrit_ui/rpc/ChangeDetailService" % (gerrit_url)
data = {
"id": 1,
"jsonrpc": "2.0",
"method": "changeDetail",
"params": [{
"id": cid
}]
}
data_encoded = bytes(json.dumps(data), "utf-8")
headers = {
"Accept": "application/json,application/json,application/jsonrequest",
"Content-Type": "application/json; charset=UTF-8",
"Content-Length": len(data_encoded)
}
detail = getpage(url, data_encoded, headers).read()
jsr = json.loads(detail.decode())
if "result" in jsr:
return jsr["result"]
else:
return None
collectable = []
for (bugid, bug) in bugs.items():
if "long_desc" in bug:
if isinstance(bug["long_desc"], dict):
bug["long_desc"] = [bug["long_desc"]]
for desc in bug["long_desc"]:
if "thetext" in desc and desc["thetext"] is not None:
matches = pat.finditer(desc["thetext"])
for match in matches:
changeno = int(match.group(1))
#Gerrit detail json like:
# https://gerrit.wikimedia.org/r/changes/67311/detail
#where 67311 is the change id.
try:
outpath = get_out_path(bugid)
if not os.path.exists(outpath):
log.info("Collect change id %s" % bugid)
cont = get_gerrit_change_detail_service(changeno)
if cont is None:
continue
with open(outpath, "w") as fp:
json.dump(cont, fp)
collectable.append((bugid, outpath))
except:
log.error("Couldn't collect change id %s" % bugid)
traceback.print_exc()
#collect change ids
with open(outfile, "wt") as fp:
writer = csv.writer(fp)
writer.writerow(["bug", "revhash"])
for (bugid, idpath) in collectable:
with open(idpath, "r") as fp:
js = json.load(fp)
for ps in js["patchSets"]:
writer.writerow([bugid, ps["revision"]["id"].strip()])
def run_bugscraper():
scrape = BugScraper()
log.info("start bug scrape")
#scrape.run("https://bugzilla.wikimedia.org/")
log.info("done bug scrape")
log.info("start correlate change ids")
scrape.correlate_changeids()
log.info("done correlate change ids")
if __name__ == "__main__":
#unittest.main()
if len(sys.argv) <= 1:
print("Usage: scrape.py [action] where [action] one of:")
print(" download: download time series data")
print(" bugscrape: scrape bugs")
sys.exit(1)
cmd = sys.argv[1]
if cmd == "download":
run_downloader()
elif cmd == "bugscrape":
run_bugscraper() | bsd-3-clause |
eeshangarg/oh-mainline | vendor/packages/gdata/src/gdata/contentforshopping/client.py | 63 | 19053 | #!/usr/bin/python
#
# Copyright (C) 2010-2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extend the gdata client for the Content API for Shopping.
TODO:
1. Proper MCA Support.
2. Add classes for datafeed functions instead of asking for raw XML.
"""
__author__ = 'afshar (Ali Afshar), dhermes (Daniel Hermes)'
import atom.data
import gdata.client
from gdata.contentforshopping.data import ClientAccount
from gdata.contentforshopping.data import ClientAccountFeed
from gdata.contentforshopping.data import DatafeedEntry
from gdata.contentforshopping.data import DatafeedFeed
from gdata.contentforshopping.data import ProductEntry
from gdata.contentforshopping.data import ProductFeed
CFS_VERSION = 'v1'
CFS_HOST = 'content.googleapis.com'
CFS_URI = 'https://%s/content' % CFS_HOST
CFS_PROJECTION = 'generic'
class ContentForShoppingClient(gdata.client.GDClient):
"""Client for Content for Shopping API.
:param account_id: Merchant account ID. This value will be used by default
for all requests, but may be overridden on a
request-by-request basis.
:param api_version: The version of the API to target. Default value: 'v1'.
:param **kwargs: Pass all addtional keywords to the GDClient constructor.
"""
api_version = '1.0'
def __init__(self, account_id=None, api_version=CFS_VERSION, **kwargs):
self.cfs_account_id = account_id
self.cfs_api_version = api_version
gdata.client.GDClient.__init__(self, **kwargs)
def _create_uri(self, account_id, resource, path=(), use_projection=True,
dry_run=False, warnings=False):
"""Create a request uri from the given arguments.
If arguments are None, use the default client attributes.
"""
account_id = account_id or self.cfs_account_id
if account_id is None:
raise ValueError('No Account ID set. '
'Either set for the client, or per request')
segments = [CFS_URI, self.cfs_api_version, account_id, resource]
if use_projection:
segments.append(CFS_PROJECTION)
segments.extend(path)
result = '/'.join(segments)
request_params = []
if dry_run:
request_params.append('dry-run')
if warnings:
request_params.append('warnings')
request_params = '&'.join(request_params)
if request_params:
result = '%s?%s' % (result, request_params)
return result
def _create_product_id(self, id, country, language):
return 'online:%s:%s:%s' % (language, country, id)
def _create_batch_feed(self, entries, operation, feed=None):
if feed is None:
feed = ProductFeed()
for entry in entries:
entry.batch_operation = gdata.data.BatchOperation(type=operation)
feed.entry.append(entry)
return feed
# Operations on a single product
def get_product(self, id, country, language, account_id=None,
auth_token=None):
"""Get a product by id, country and language.
:param id: The product ID
:param country: The country (target_country)
:param language: The language (content_language)
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
pid = self._create_product_id(id, country, language)
uri = self._create_uri(account_id, 'items/products', path=[pid])
return self.get_entry(uri, desired_class=ProductEntry,
auth_token=auth_token)
def insert_product(self, product, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Create a new product, by posting the product entry feed.
:param product: A :class:`gdata.contentforshopping.data.ProductEntry` with
the required product data.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'items/products',
dry_run=dry_run, warnings=warnings)
return self.post(product, uri=uri, auth_token=auth_token)
def update_product(self, product, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Update a product, by putting the product entry feed.
:param product: A :class:`gdata.contentforshopping.data.ProductEntry` with
the required product data.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False
by default.
"""
pid = self._create_product_id(product.product_id.text,
product.target_country.text,
product.content_language.text)
uri = self._create_uri(account_id, 'items/products', path=[pid],
dry_run=dry_run, warnings=warnings)
return self.update(product, uri=uri, auth_token=auth_token)
def delete_product(self, product, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Delete a product
:param product: A :class:`gdata.contentforshopping.data.ProductEntry` with
the required product data.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
pid = self._create_product_id(product.product_id.text,
product.target_country.text,
product.content_language.text)
uri = self._create_uri(account_id, 'items/products', path=[pid],
dry_run=dry_run, warnings=warnings)
return self.delete(uri, auth_token=auth_token)
# Operations on multiple products
def get_products(self, start_index=None, max_results=None, account_id=None,
auth_token=None):
"""Get a feed of products for the account.
:param max_results: The maximum number of results to return (default 25,
maximum 250).
:param start_index: The starting index of the feed to return (default 1,
maximum 10000)
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'items/products')
return self.get_feed(uri, auth_token=auth_token,
desired_class=gdata.contentforshopping.data.ProductFeed)
def batch(self, feed, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Send a batch request.
:param feed: The feed of batch entries to send.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'items/products', path=['batch'],
dry_run=dry_run, warnings=warnings)
return self.post(feed, uri=uri, auth_token=auth_token,
desired_class=ProductFeed)
def insert_products(self, products, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Insert the products using a batch request
:param products: A list of product entries
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
feed = self._create_batch_feed(products, 'insert')
return self.batch(feed)
def update_products(self, products, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Update the products using a batch request
:param products: A list of product entries
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
.. note:: Entries must have the atom:id element set.
"""
feed = self._create_batch_feed(products, 'update')
return self.batch(feed)
def delete_products(self, products, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Delete the products using a batch request.
:param products: A list of product entries
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
.. note:: Entries must have the atom:id element set.
"""
feed = self._create_batch_feed(products, 'delete')
return self.batch(feed)
# Operations on datafeeds
def get_datafeeds(self, account_id=None):
"""Get the feed of datafeeds.
:param account_id: The Sub-Account ID. If ommitted the default
Account ID will be used for this client.
"""
uri = self._create_uri(account_id, 'datafeeds/products',
use_projection=False)
return self.get_feed(uri, desired_class=DatafeedFeed)
# Operations on a single datafeed
def get_datafeed(self, feed_id, account_id=None, auth_token=None):
"""Get the feed of a single datafeed.
:param feed_id: The ID of the desired datafeed.
:param account_id: The Sub-Account ID. If ommitted the default
Account ID will be used for this client.
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'datafeeds/products', path=[feed_id],
use_projection=False)
return self.get_feed(uri, auth_token=auth_token,
desired_class=DatafeedEntry)
def insert_datafeed(self, entry, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Insert a datafeed.
:param entry: XML Content of post request required for registering a
datafeed.
:param account_id: The Sub-Account ID. If ommitted the default
Account ID will be used for this client.
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'datafeeds/products',
use_projection=False, dry_run=dry_run,
warnings=warnings)
return self.post(entry, uri=uri, auth_token=auth_token)
def update_datafeed(self, entry, feed_id, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Update the feed of a single datafeed.
:param entry: XML Content of put request required for updating a
datafeed.
:param feed_id: The ID of the desired datafeed.
:param account_id: The Sub-Account ID. If ommitted the default
Account ID will be used for this client.
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'datafeeds/products', path=[feed_id],
use_projection=False, dry_run=dry_run,
warnings=warnings)
return self.update(entry, auth_token=auth_token, uri=uri)
def delete_datafeed(self, feed_id, account_id=None, auth_token=None):
"""Delete a single datafeed.
:param feed_id: The ID of the desired datafeed.
:param account_id: The Sub-Account ID. If ommitted the default
Account ID will be used for this client.
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'datafeeds/products', path=[feed_id],
use_projection=False)
return self.delete(uri, auth_token=auth_token)
# Operations on client accounts
def get_client_accounts(self, account_id=None, auth_token=None):
"""Get the feed of managed accounts
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'managedaccounts/products',
use_projection=False)
return self.get_feed(uri, desired_class=ClientAccountFeed,
auth_token=auth_token)
def insert_client_account(self, entry, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Insert a client account entry
:param entry: An entry of type ClientAccount
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'managedaccounts/products',
use_projection=False, dry_run=dry_run,
warnings=warnings)
return self.post(entry, uri=uri, auth_token=auth_token)
def update_client_account(self, entry, client_account_id, account_id=None,
auth_token=None, dry_run=False, warnings=False):
"""Update a client account
:param entry: An entry of type ClientAccount to update to
:param client_account_id: The client account ID
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'managedaccounts/products',
path=[client_account_id], use_projection=False,
dry_run=dry_run, warnings=warnings)
return self.update(entry, uri=uri, auth_token=auth_token)
def delete_client_account(self, client_account_id, account_id=None,
auth_token=None, dry_run=False, warnings=False):
"""Delete a client account
:param client_account_id: The client account ID
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'managedaccounts/products',
path=[client_account_id], use_projection=False,
dry_run=dry_run, warnings=warnings)
return self.delete(uri, auth_token=auth_token)
| agpl-3.0 |
jayk/linux | tools/perf/python/twatch.py | 219 | 1789 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(type = perf.TYPE_SOFTWARE,
config = perf.COUNT_SW_DUMMY,
task = 1, comm = 1, mmap = 0, freq = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU)
"""What we want are just the PERF_RECORD_ lifetime events for threads,
using the default, PERF_TYPE_HARDWARE + PERF_COUNT_HW_CYCLES & freq=1
(the default), makes perf reenable irq_vectors:local_timer_entry, when
disabling nohz, not good for some use cases where all we want is to get
threads comes and goes... So use (perf.TYPE_SOFTWARE, perf_COUNT_SW_DUMMY,
freq=0) instead."""
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
SOKP/external_chromium_org | tools/site_compare/scrapers/firefox/firefox2.py | 189 | 6725 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Does scraping for Firefox 2.0."""
import pywintypes
import time
import types
from drivers import keyboard
from drivers import mouse
from drivers import windowing
# Default version
version = "2.0.0.6"
DEFAULT_PATH = r"c:\program files\mozilla firefox\firefox.exe"
# TODO(jhaas): the Firefox scraper is a bit rickety at the moment. Known
# issues: 1) won't work if the default profile puts toolbars in different
# locations, 2) uses sleep() statements rather than more robust checks,
# 3) fails badly if an existing Firefox window is open when the scrape
# is invoked. This needs to be fortified at some point.
def GetBrowser(path):
"""Invoke the Firefox browser and return the process and window.
Args:
path: full path to browser
Returns:
A tuple of (process handle, render pane)
"""
if not path: path = DEFAULT_PATH
# Invoke Firefox
(proc, wnd) = windowing.InvokeAndWait(path)
# Get the content pane
render_pane = windowing.FindChildWindow(
wnd,
"MozillaWindowClass/MozillaWindowClass/MozillaWindowClass")
return (proc, wnd, render_pane)
def InvokeBrowser(path):
"""Invoke the Firefox browser.
Args:
path: full path to browser
Returns:
A tuple of (main window, process handle, render pane)
"""
# Reuse an existing instance of the browser if we can find one. This
# may not work correctly, especially if the window is behind other windows.
wnds = windowing.FindChildWindows(0, "MozillaUIWindowClass")
if len(wnds):
wnd = wnds[0]
proc = None
else:
# Invoke Firefox
(proc, wnd) = windowing.InvokeAndWait(path)
# Get the content pane
render_pane = windowing.FindChildWindow(
wnd,
"MozillaWindowClass/MozillaWindowClass/MozillaWindowClass")
return (wnd, proc, render_pane)
def Scrape(urls, outdir, size, pos, timeout=20, **kwargs):
"""Invoke a browser, send it to a series of URLs, and save its output.
Args:
urls: list of URLs to scrape
outdir: directory to place output
size: size of browser window to use
pos: position of browser window
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
Returns:
None if success, else an error string
"""
if "path" in kwargs and kwargs["path"]: path = kwargs["path"]
else: path = DEFAULT_PATH
(wnd, proc, render_pane) = InvokeBrowser(path)
# Resize and reposition the frame
windowing.MoveAndSizeWindow(wnd, pos, size, render_pane)
time.sleep(3)
# Firefox is a bit of a pain: it doesn't use standard edit controls,
# and it doesn't display a throbber when there's no tab. Let's make
# sure there's at least one tab, then select the first one
mouse.ClickInWindow(wnd)
keyboard.TypeString("[t]", True)
mouse.ClickInWindow(wnd, (30, 115))
time.sleep(2)
timedout = False
# Visit each URL we're given
if type(urls) in types.StringTypes: urls = [urls]
for url in urls:
# Use keyboard shortcuts
keyboard.TypeString("{d}", True)
keyboard.TypeString(url)
keyboard.TypeString("\n")
# Wait for the page to finish loading
load_time = windowing.WaitForThrobber(wnd, (10, 96, 26, 112), timeout)
timedout = load_time < 0
if timedout:
break
# Scrape the page
image = windowing.ScrapeWindow(render_pane)
# Save to disk
if "filename" in kwargs:
if callable(kwargs["filename"]):
filename = kwargs["filename"](url)
else:
filename = kwargs["filename"]
else:
filename = windowing.URLtoFilename(url, outdir, ".bmp")
image.save(filename)
# Close all the tabs, cheesily
mouse.ClickInWindow(wnd)
while len(windowing.FindChildWindows(0, "MozillaUIWindowClass")):
keyboard.TypeString("[w]", True)
time.sleep(1)
if timedout:
return "timeout"
def Time(urls, size, timeout, **kwargs):
"""Measure how long it takes to load each of a series of URLs
Args:
urls: list of URLs to time
size: size of browser window to use
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
Returns:
A list of tuples (url, time). "time" can be "crashed" or "timeout"
"""
if "path" in kwargs and kwargs["path"]: path = kwargs["path"]
else: path = DEFAULT_PATH
proc = None
# Visit each URL we're given
if type(urls) in types.StringTypes: urls = [urls]
ret = []
for url in urls:
try:
# Invoke the browser if necessary
if not proc:
(wnd, proc, render_pane) = InvokeBrowser(path)
# Resize and reposition the frame
windowing.MoveAndSizeWindow(wnd, (0,0), size, render_pane)
time.sleep(3)
# Firefox is a bit of a pain: it doesn't use standard edit controls,
# and it doesn't display a throbber when there's no tab. Let's make
# sure there's at least one tab, then select the first one
mouse.ClickInWindow(wnd)
keyboard.TypeString("[t]", True)
mouse.ClickInWindow(wnd, (30, 115))
time.sleep(2)
# Use keyboard shortcuts
keyboard.TypeString("{d}", True)
keyboard.TypeString(url)
keyboard.TypeString("\n")
# Wait for the page to finish loading
load_time = windowing.WaitForThrobber(wnd, (10, 96, 26, 112), timeout)
timedout = load_time < 0
if timedout:
load_time = "timeout"
# Try to close the browser; if this fails it's probably a crash
mouse.ClickInWindow(wnd)
count = 0
while (len(windowing.FindChildWindows(0, "MozillaUIWindowClass"))
and count < 5):
keyboard.TypeString("[w]", True)
time.sleep(1)
count = count + 1
if len(windowing.FindChildWindows(0, "MozillaUIWindowClass")):
windowing.EndProcess(proc)
load_time = "crashed"
proc = None
except pywintypes.error:
proc = None
load_time = "crashed"
ret.append( (url, load_time) )
if proc:
count = 0
while (len(windowing.FindChildWindows(0, "MozillaUIWindowClass"))
and count < 5):
keyboard.TypeString("[w]", True)
time.sleep(1)
count = count + 1
return ret
def main():
# We're being invoked rather than imported, so run some tests
path = r"c:\sitecompare\scrapes\Firefox\2.0.0.6"
windowing.PreparePath(path)
# Scrape three sites and save the results
Scrape(
["http://www.microsoft.com", "http://www.google.com",
"http://www.sun.com"],
path, (1024, 768), (0, 0))
return 0
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
SidSachdev/SFrame | oss_src/unity/python/sframe/meta/bytecodetools/instruction.py | 31 | 2453 | '''
Created on May 10, 2012
@author: sean
'''
from __future__ import print_function
import opcode
import sys
py3 = sys.version_info.major >= 3
co_ord = (lambda c:c) if py3 else ord
class Instruction(object):
'''
A Python byte-code instruction.
'''
def __init__(self, i= -1, op=None, lineno=None):
self.i = i
self.op = op
self.lineno = lineno
self.oparg = None
self.arg = None
self.extended_arg = 0
self.linestart = False
@property
def opname(self):
return opcode.opname[self.op]
@property
def is_jump(self):
return self.op in opcode.hasjrel or self.op in opcode.hasjabs
@property
def to(self):
if self.op in opcode.hasjrel:
return self.arg
elif self.op in opcode.hasjabs:
return self.oparg
else:
raise Exception("this is not a jump op (%s)" % (self.opname,))
def __repr__(self):
res = '<%s(%i)' % (opcode.opname[self.op], self.i,)
if self.arg is not None:
res += ' arg=%r' % (self.arg,)
elif self.oparg is not None:
res += ' oparg=%r' % (self.oparg,)
return res + '>'
def __str__(self):
result = []
if self.linestart:
result.append("%3d" % self.lineno)
else:
result.append(" ")
if self.lasti:
result.append('-->')
else:
result.append(' ')
if self.label:
result.append('>>')
else:
result.append(' ')
result.append(repr(self.i).rjust(4))
result.append(opcode.opname[self.op].ljust(20))
if self.op >= opcode.HAVE_ARGUMENT:
result.append(repr(self.oparg).rjust(5))
if self.op in opcode.hasconst:
result.append('(' + repr(self.arg) + ')')
elif self.op in opcode.hasname:
result.append('(' + repr(self.arg) + ')')
elif self.op in opcode.hasjrel:
result.append('(to ' + repr(self.arg) + ')')
elif self.op in opcode.haslocal:
result.append('(' + repr(self.arg) + ')')
elif self.op in opcode.hascompare:
result.append('(' + repr(self.arg) + ')')
elif self.op in opcode.hasfree:
result.append('(' + repr(self.arg) + ')')
return ' '.join(result)
| bsd-3-clause |
secretdataz/OpenKore-Src | src/scons-local-2.0.1/SCons/Tool/mwcc.py | 61 | 6894 | """SCons.Tool.mwcc
Tool-specific initialization for the Metrowerks CodeWarrior compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mwcc.py 5134 2010/08/16 23:02:40 bdeegan"
import os
import os.path
import SCons.Util
def set_vars(env):
"""Set MWCW_VERSION, MWCW_VERSIONS, and some codewarrior environment vars
MWCW_VERSIONS is set to a list of objects representing installed versions
MWCW_VERSION is set to the version object that will be used for building.
MWCW_VERSION can be set to a string during Environment
construction to influence which version is chosen, otherwise
the latest one from MWCW_VERSIONS is used.
Returns true if at least one version is found, false otherwise
"""
desired = env.get('MWCW_VERSION', '')
# return right away if the variables are already set
if isinstance(desired, MWVersion):
return 1
elif desired is None:
return 0
versions = find_versions()
version = None
if desired:
for v in versions:
if str(v) == desired:
version = v
elif versions:
version = versions[-1]
env['MWCW_VERSIONS'] = versions
env['MWCW_VERSION'] = version
if version is None:
return 0
env.PrependENVPath('PATH', version.clpath)
env.PrependENVPath('PATH', version.dllpath)
ENV = env['ENV']
ENV['CWFolder'] = version.path
ENV['LM_LICENSE_FILE'] = version.license
plus = lambda x: '+%s' % x
ENV['MWCIncludes'] = os.pathsep.join(map(plus, version.includes))
ENV['MWLibraries'] = os.pathsep.join(map(plus, version.libs))
return 1
def find_versions():
"""Return a list of MWVersion objects representing installed versions"""
versions = []
### This function finds CodeWarrior by reading from the registry on
### Windows. Some other method needs to be implemented for other
### platforms, maybe something that calls env.WhereIs('mwcc')
if SCons.Util.can_read_reg:
try:
HLM = SCons.Util.HKEY_LOCAL_MACHINE
product = 'SOFTWARE\\Metrowerks\\CodeWarrior\\Product Versions'
product_key = SCons.Util.RegOpenKeyEx(HLM, product)
i = 0
while True:
name = product + '\\' + SCons.Util.RegEnumKey(product_key, i)
name_key = SCons.Util.RegOpenKeyEx(HLM, name)
try:
version = SCons.Util.RegQueryValueEx(name_key, 'VERSION')
path = SCons.Util.RegQueryValueEx(name_key, 'PATH')
mwv = MWVersion(version[0], path[0], 'Win32-X86')
versions.append(mwv)
except SCons.Util.RegError:
pass
i = i + 1
except SCons.Util.RegError:
pass
return versions
class MWVersion(object):
def __init__(self, version, path, platform):
self.version = version
self.path = path
self.platform = platform
self.clpath = os.path.join(path, 'Other Metrowerks Tools',
'Command Line Tools')
self.dllpath = os.path.join(path, 'Bin')
# The Metrowerks tools don't store any configuration data so they
# are totally dumb when it comes to locating standard headers,
# libraries, and other files, expecting all the information
# to be handed to them in environment variables. The members set
# below control what information scons injects into the environment
### The paths below give a normal build environment in CodeWarrior for
### Windows, other versions of CodeWarrior might need different paths.
msl = os.path.join(path, 'MSL')
support = os.path.join(path, '%s Support' % platform)
self.license = os.path.join(path, 'license.dat')
self.includes = [msl, support]
self.libs = [msl, support]
def __str__(self):
return self.version
CSuffixes = ['.c', '.C']
CXXSuffixes = ['.cc', '.cpp', '.cxx', '.c++', '.C++']
def generate(env):
"""Add Builders and construction variables for the mwcc to an Environment."""
import SCons.Defaults
import SCons.Tool
set_vars(env)
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in CSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCAction)
for suffix in CXXSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CXXAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCXXAction)
env['CCCOMFLAGS'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -nolink -o $TARGET $SOURCES'
env['CC'] = 'mwcc'
env['CCCOM'] = '$CC $CFLAGS $CCFLAGS $CCCOMFLAGS'
env['CXX'] = 'mwcc'
env['CXXCOM'] = '$CXX $CXXFLAGS $CCCOMFLAGS'
env['SHCC'] = '$CC'
env['SHCCFLAGS'] = '$CCFLAGS'
env['SHCFLAGS'] = '$CFLAGS'
env['SHCCCOM'] = '$SHCC $SHCFLAGS $SHCCFLAGS $CCCOMFLAGS'
env['SHCXX'] = '$CXX'
env['SHCXXFLAGS'] = '$CXXFLAGS'
env['SHCXXCOM'] = '$SHCXX $SHCXXFLAGS $CCCOMFLAGS'
env['CFILESUFFIX'] = '.c'
env['CXXFILESUFFIX'] = '.cpp'
env['CPPDEFPREFIX'] = '-D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '-I'
env['INCSUFFIX'] = ''
#env['PCH'] = ?
#env['PCHSTOP'] = ?
def exists(env):
return set_vars(env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
Emergya/icm-openedx-educamadrid-platform-basic | common/djangoapps/course_modes/signals.py | 57 | 1560 | """
Signal handler for setting default course mode expiration dates
"""
from django.core.exceptions import ObjectDoesNotExist
from django.dispatch.dispatcher import receiver
from xmodule.modulestore.django import SignalHandler, modulestore
from .models import CourseMode, CourseModeExpirationConfig
@receiver(SignalHandler.course_published)
def _listen_for_course_publish(sender, course_key, **kwargs): # pylint: disable=unused-argument
"""
Catches the signal that a course has been published in Studio and
sets the verified mode dates to defaults.
"""
try:
verified_mode = CourseMode.objects.get(course_id=course_key, mode_slug=CourseMode.VERIFIED)
if _should_update_date(verified_mode):
course = modulestore().get_course(course_key)
if not course:
return None
verification_window = CourseModeExpirationConfig.current().verification_window
new_expiration_datetime = course.end - verification_window
if verified_mode.expiration_datetime != new_expiration_datetime:
# Set the expiration_datetime without triggering the explicit flag
verified_mode._expiration_datetime = new_expiration_datetime # pylint: disable=protected-access
verified_mode.save()
except ObjectDoesNotExist:
pass
def _should_update_date(verified_mode):
""" Returns whether or not the verified mode should be updated. """
return not(verified_mode is None or verified_mode.expiration_datetime_is_explicit)
| agpl-3.0 |
jlcarmic/producthunt_simulator | venv/lib/python2.7/site-packages/scipy/linalg/lapack.py | 6 | 7986 | """
Low-level LAPACK functions (:mod:`scipy.linalg.lapack`)
=======================================================
This module contains low-level functions from the LAPACK library.
The `*gegv` family of routines have been removed from LAPACK 3.6.0
and have been deprecated in SciPy 0.17.0. They will be removed in
a future release.
.. versionadded:: 0.12.0
.. warning::
These functions do little to no error checking.
It is possible to cause crashes by mis-using them,
so prefer using the higher-level routines in `scipy.linalg`.
Finding functions
-----------------
.. autosummary::
get_lapack_funcs
All functions
-------------
.. autosummary::
:toctree: generated/
sgbsv
dgbsv
cgbsv
zgbsv
sgbtrf
dgbtrf
cgbtrf
zgbtrf
sgbtrs
dgbtrs
cgbtrs
zgbtrs
sgebal
dgebal
cgebal
zgebal
sgees
dgees
cgees
zgees
sgeev
dgeev
cgeev
zgeev
sgeev_lwork
dgeev_lwork
cgeev_lwork
zgeev_lwork
sgegv
dgegv
cgegv
zgegv
sgehrd
dgehrd
cgehrd
zgehrd
sgehrd_lwork
dgehrd_lwork
cgehrd_lwork
zgehrd_lwork
sgelss
dgelss
cgelss
zgelss
sgelss_lwork
dgelss_lwork
cgelss_lwork
zgelss_lwork
sgelsd
dgelsd
cgelsd
zgelsd
sgelsd_lwork
dgelsd_lwork
cgelsd_lwork
zgelsd_lwork
sgelsy
dgelsy
cgelsy
zgelsy
sgelsy_lwork
dgelsy_lwork
cgelsy_lwork
zgelsy_lwork
sgeqp3
dgeqp3
cgeqp3
zgeqp3
sgeqrf
dgeqrf
cgeqrf
zgeqrf
sgerqf
dgerqf
cgerqf
zgerqf
sgesdd
dgesdd
cgesdd
zgesdd
sgesdd_lwork
dgesdd_lwork
cgesdd_lwork
zgesdd_lwork
sgesv
dgesv
cgesv
zgesv
sgetrf
dgetrf
cgetrf
zgetrf
sgetri
dgetri
cgetri
zgetri
sgetri_lwork
dgetri_lwork
cgetri_lwork
zgetri_lwork
sgetrs
dgetrs
cgetrs
zgetrs
sgges
dgges
cgges
zgges
sggev
dggev
cggev
zggev
chbevd
zhbevd
chbevx
zhbevx
cheev
zheev
cheevd
zheevd
cheevr
zheevr
chegv
zhegv
chegvd
zhegvd
chegvx
zhegvx
slarf
dlarf
clarf
zlarf
slarfg
dlarfg
clarfg
zlarfg
slartg
dlartg
clartg
zlartg
slasd4
dlasd4
slaswp
dlaswp
claswp
zlaswp
slauum
dlauum
clauum
zlauum
spbsv
dpbsv
cpbsv
zpbsv
spbtrf
dpbtrf
cpbtrf
zpbtrf
spbtrs
dpbtrs
cpbtrs
zpbtrs
sposv
dposv
cposv
zposv
spotrf
dpotrf
cpotrf
zpotrf
spotri
dpotri
cpotri
zpotri
spotrs
dpotrs
cpotrs
zpotrs
crot
zrot
strsyl
dtrsyl
ctrsyl
ztrsyl
strtri
dtrtri
ctrtri
ztrtri
strtrs
dtrtrs
ctrtrs
ztrtrs
cunghr
zunghr
cungqr
zungqr
cungrq
zungrq
cunmqr
zunmqr
sgtsv
dgtsv
cgtsv
zgtsv
sptsv
dptsv
cptsv
zptsv
slamch
dlamch
sorghr
dorghr
sorgqr
dorgqr
sorgrq
dorgrq
sormqr
dormqr
ssbev
dsbev
ssbevd
dsbevd
ssbevx
dsbevx
ssyev
dsyev
ssyevd
dsyevd
ssyevr
dsyevr
ssygv
dsygv
ssygvd
dsygvd
ssygvx
dsygvx
slange
dlange
clange
zlange
"""
#
# Author: Pearu Peterson, March 2002
#
from __future__ import division, print_function, absolute_import
__all__ = ['get_lapack_funcs']
import numpy as _np
from .blas import _get_funcs
# Backward compatibility:
from .blas import find_best_blas_type as find_best_lapack_type
from scipy.linalg import _flapack
try:
from scipy.linalg import _clapack
except ImportError:
_clapack = None
# Backward compatibility
from scipy._lib._util import DeprecatedImport as _DeprecatedImport
clapack = _DeprecatedImport("scipy.linalg.blas.clapack", "scipy.linalg.lapack")
flapack = _DeprecatedImport("scipy.linalg.blas.flapack", "scipy.linalg.lapack")
# Expose all functions (only flapack --- clapack is an implementation detail)
empty_module = None
from scipy.linalg._flapack import *
del empty_module
_dep_message = """The `*gegv` family of routines has been deprecated in
LAPACK 3.6.0 in favor of the `*ggev` family of routines.
The corresponding wrappers will be removed from SciPy in
a future release."""
cgegv = _np.deprecate(cgegv, old_name='cgegv', message=_dep_message)
dgegv = _np.deprecate(dgegv, old_name='dgegv', message=_dep_message)
sgegv = _np.deprecate(sgegv, old_name='sgegv', message=_dep_message)
zgegv = _np.deprecate(zgegv, old_name='zgegv', message=_dep_message)
# Modyfy _flapack in this scope so the deprecation warnings apply to
# functions returned by get_lapack_funcs.
_flapack.cgegv = cgegv
_flapack.dgegv = dgegv
_flapack.sgegv = sgegv
_flapack.zgegv = zgegv
# some convenience alias for complex functions
_lapack_alias = {
'corghr': 'cunghr', 'zorghr': 'zunghr',
'corghr_lwork': 'cunghr_lwork', 'zorghr_lwork': 'zunghr_lwork',
'corgqr': 'cungqr', 'zorgqr': 'zungqr',
'cormqr': 'cunmqr', 'zormqr': 'zunmqr',
'corgrq': 'cungrq', 'zorgrq': 'zungrq',
}
def get_lapack_funcs(names, arrays=(), dtype=None):
"""Return available LAPACK function objects from names.
Arrays are used to determine the optimal prefix of LAPACK routines.
Parameters
----------
names : str or sequence of str
Name(s) of LAPACK functions without type prefix.
arrays : sequence of ndarrays, optional
Arrays can be given to determine optimal prefix of LAPACK
routines. If not given, double-precision routines will be
used, otherwise the most generic type in arrays will be used.
dtype : str or dtype, optional
Data-type specifier. Not used if `arrays` is non-empty.
Returns
-------
funcs : list
List containing the found function(s).
Notes
-----
This routine automatically chooses between Fortran/C
interfaces. Fortran code is used whenever possible for arrays with
column major order. In all other cases, C code is preferred.
In LAPACK, the naming convention is that all functions start with a
type prefix, which depends on the type of the principal
matrix. These can be one of {'s', 'd', 'c', 'z'} for the numpy
types {float32, float64, complex64, complex128} respectevely, and
are stored in attribute `typecode` of the returned functions.
"""
return _get_funcs(names, arrays, dtype,
"LAPACK", _flapack, _clapack,
"flapack", "clapack", _lapack_alias)
def _compute_lwork(routine, *args, **kwargs):
"""
Round floating-point lwork returned by lapack to integer.
Several LAPACK routines compute optimal values for LWORK, which
they return in a floating-point variable. However, for large
values of LWORK, single-precision floating point is not sufficient
to hold the exact value --- some LAPACK versions (<= 3.5.0 at
least) truncate the returned integer to single precision and in
some cases this can be smaller than the required value.
"""
wi = routine(*args, **kwargs)
if len(wi) < 2:
raise ValueError('')
info = wi[-1]
if info != 0:
raise ValueError("Internal work array size computation failed: "
"%d" % (info,))
lwork = [w.real for w in wi[:-1]]
dtype = getattr(routine, 'dtype', None)
if dtype == _np.float32 or dtype == _np.complex64:
# Single-precision routine -- take next fp value to work
# around possible truncation in LAPACK code
lwork = _np.nextafter(lwork, _np.inf, dtype=_np.float32)
lwork = _np.array(lwork, _np.int64)
if _np.any(_np.logical_or(lwork < 0, lwork > _np.iinfo(_np.int32).max)):
raise ValueError("Too large work array required -- computation cannot "
"be performed with standard 32-bit LAPACK.")
lwork = lwork.astype(_np.int32)
if lwork.size == 1:
return lwork[0]
return lwork
| mit |
brechtm/rinohtype | src/rinoh/backend/pdf/xobject/purepng.py | 1 | 114427 | #!/usr/bin/env python
# encoding=utf-8
#
# png.py - PNG encoder/decoder in pure Python
#
# Copyright (C) 2015 Pavel Zlatovratskii <scondo@mail.ru>
# Copyright (C) 2006 Johann C. Rocholl <johann@browsershots.org>
# Portions Copyright (C) 2009 David Jones <drj@pobox.com>
# And probably portions Copyright (C) 2006 Nicko van Someren <nicko@nicko.org>
#
# Original concept by Johann C. Rocholl.
#
# LICENCE (MIT)
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Pure Python PNG Reader/Writer
This Python module implements support for PNG images (see PNG
specification at http://www.w3.org/TR/2003/REC-PNG-20031110/ ). It reads
and writes PNG files with all allowable bit depths
(1/2/4/8/16/24/32/48/64 bits per pixel) and colour combinations:
greyscale (1/2/4/8/16 bit); RGB, RGBA, LA (greyscale with alpha) with
8/16 bits per channel; colour mapped images (1/2/4/8 bit).
Adam7 interlacing is supported for reading and
writing. A number of optional chunks can be specified (when writing)
and understood (when reading): ``tRNS``, ``bKGD``, ``gAMA``.
For help, type ``import png; help(png)`` in your python interpreter.
A good place to start is the :class:`Reader` and :class:`Writer`
classes.
Requires Python 2.3. Best with Python 2.6 and higher. Installation is
trivial, but see the ``README.txt`` file (with the source distribution)
for details.
A note on spelling and terminology
----------------------------------
Generally British English spelling is used in the documentation. So
that's "greyscale" and "colour". This not only matches the author's
native language, it's also used by the PNG specification.
The major colour models supported by PNG (and hence by this module) are:
greyscale, RGB, greyscale--alpha, RGB--alpha. These are sometimes
referred to using the abbreviations: L, RGB, LA, RGBA. In this case
each letter abbreviates a single channel: *L* is for Luminance or Luma
or Lightness which is the channel used in greyscale images; *R*, *G*,
*B* stand for Red, Green, Blue, the components of a colour image; *A*
stands for Alpha, the opacity channel (used for transparency effects,
but higher values are more opaque, so it makes sense to call it
opacity).
A note on formats
-----------------
When getting pixel data out of this module (reading) and presenting
data to this module (writing) there are a number of ways the data could
be represented as a Python value. Generally this module uses one of
three formats called "flat row flat pixel", "boxed row flat pixel", and
"boxed row boxed pixel". Basically the concern is whether each pixel
and each row comes in its own little tuple (box), or not.
Consider an image that is 3 pixels wide by 2 pixels high, and each pixel
has RGB components:
Boxed row flat pixel::
iter([R,G,B, R,G,B, R,G,B],
[R,G,B, R,G,B, R,G,B])
Each row appears as its own sequence, but the pixels are flattened so
that three values for one pixel simply follow the three values for
the previous pixel. This is the most common format used, because it
provides a good compromise between space and convenience.
Row sequence supposed to be compatible with 'buffer' protocol in
addition to standard sequence methods so 'buffer()' can be used to
get fast per-byte access.
All rows are contained in iterable or iterable-compatible container.
(use 'iter()' to ensure)
Flat row flat pixel::
[R,G,B, R,G,B, R,G,B,
R,G,B, R,G,B, R,G,B]
The entire image is one single giant sequence of colour values.
Generally an array will be used (to save space), not a list.
Boxed row boxed pixel::
list([ (R,G,B), (R,G,B), (R,G,B) ],
[ (R,G,B), (R,G,B), (R,G,B) ])
Each row appears in its own list, but each pixel also appears in its own
tuple. A serious memory burn in Python.
In all cases the top row comes first, and for each row the pixels are
ordered from left-to-right. Within a pixel the values appear in the
order, R-G-B-A (or L-A for greyscale--alpha).
There is a fourth format, mentioned because it is used internally,
is close to what lies inside a PNG file itself, and has some support
from the public API. This format is called packed. When packed,
each row is a sequence of bytes (integers from 0 to 255), just as
it is before PNG scanline filtering is applied. When the bit depth
is 8 this is essentially the same as boxed row flat pixel; when the
bit depth is less than 8, several pixels are packed into each byte;
when the bit depth is 16 (the only value more than 8 that is supported
by the PNG image format) each pixel value is decomposed into 2 bytes
(and `packed` is a misnomer). This format is used by the
:meth:`Writer.write_packed` method. It isn't usually a convenient
format, but may be just right if the source data for the PNG image
comes from something that uses a similar format (for example, 1-bit
BMPs, or another PNG file).
And now, my famous members
--------------------------
"""
from array import array
import itertools
import logging
import math
# http://www.python.org/doc/2.4.4/lib/module-operator.html
import operator
import datetime
import time
import struct
import sys
import zlib
# http://www.python.org/doc/2.4.4/lib/module-warnings.html
import warnings
try:
from functools import reduce
except ImportError:
# suppose to get there on python<2.7 where reduce is only built-in function
pass
try:
from itertools import imap as map
except ImportError:
# On Python 3 there is no imap, but map works like imap instead
pass
__version__ = "0.3.0"
__all__ = ['png_signature', 'Image', 'Reader', 'Writer',
'Error', 'FormatError', 'ChunkError',
'Filter', 'register_extra_filter',
'write_chunks', 'from_array', 'parse_mode', 'MergedPlanes',
'PERCEPTUAL', 'RELATIVE_COLORIMETRIC', 'SATURATION',
'ABSOLUTE_COLORIMETRIC']
# The PNG signature.
# http://www.w3.org/TR/PNG/#5PNG-file-signature
png_signature = struct.pack('8B', 137, 80, 78, 71, 13, 10, 26, 10)
_adam7 = ((0, 0, 8, 8),
(4, 0, 8, 8),
(0, 4, 4, 8),
(2, 0, 4, 4),
(0, 2, 2, 4),
(1, 0, 2, 2),
(0, 1, 1, 2))
# registered keywords
# http://www.w3.org/TR/2003/REC-PNG-20031110/#11keywords
_registered_kw = ('Title', 'Author', 'Description', 'Copyright', 'Software',
'Disclaimer', 'Warning', 'Source', 'Comment',
'Creation Time')
# rendering intent
PERCEPTUAL = 0
RELATIVE_COLORIMETRIC = 1
SATURATION = 2
ABSOLUTE_COLORIMETRIC = 3
def group(s, n):
"""Repack iterator items into groups"""
# See http://www.python.org/doc/2.6/library/functions.html#zip
return list(zip(*[iter(s)] * n))
def _rel_import(module, tgt):
"""Using relative import in both Python 2 and Python 3"""
try:
exec("from ." + module + " import " + tgt, globals(), locals())
except SyntaxError:
# On Python < 2.5 relative import cause syntax error
exec("from " + module + " import " + tgt, globals(), locals())
except (ValueError, SystemError):
# relative import in non-package, try absolute
exec("from " + module + " import " + tgt, globals(), locals())
return eval(tgt)
try:
next
except NameError:
def next(it):
"""trivial `next` emulation"""
return it.next()
try:
bytes
except NameError:
bytes = str
# Define a bytearray_to_bytes() function.
# The definition of this function changes according to what
# version of Python we are on.
def bytearray_to_bytes(src):
"""Default version"""
return bytes(src)
def newHarray(length=0):
"""fast init by length"""
return array('H', [0]) * length
# bytearray is faster than array('B'), so we prefer to use it
# where available.
try:
# bytearray exists (>= Python 2.6).
newBarray = bytearray
copyBarray = bytearray
except NameError:
# bytearray does not exist. We're probably < Python 2.6 (the
# version in which bytearray appears).
def bytearray(src=tuple()):
"""Bytearray-like array"""
return array('B', src)
def newBarray(length=0):
"""fast init by length"""
return array('B', [0]) * length
if hasattr(array, '__copy__'):
# a bit faster if possible
copyBarray = array.__copy__
else:
copyBarray = bytearray
def bytearray_to_bytes(row):
"""
Convert bytearray to bytes.
Recal that `row` will actually be an ``array``.
"""
return row.tostring()
try:
from itertools import tee
except ImportError:
def tee(iterable, n=2):
"""Return n independent iterators from a single iterable."""
it = iter(iterable)
deques = [list() for _ in range(n)]
def gen(mydeque):
while True:
if not mydeque: # when the local deque is empty
newval = next(it) # fetch a new value and
for d in deques: # load it to all the deques
d.append(newval)
yield mydeque.pop(0)
return tuple(map(gen, deques))
# Python 3 workaround
try:
basestring
except NameError:
basestring = str
# Conditionally convert to bytes. Works on Python 2 and Python 3.
try:
bytes('', 'ascii')
def strtobytes(x): return bytes(x, 'iso8859-1') # noqa
def bytestostr(x): return str(x, 'iso8859-1') # noqa
except (NameError, TypeError):
# We get NameError when bytes() does not exist (most Python
# 2.x versions), and TypeError when bytes() exists but is on
# Python 2.x (when it is an alias for str() and takes at most
# one argument).
strtobytes = str
bytestostr = str
zerobyte = strtobytes(chr(0))
try:
set
except NameError:
from sets import Set as set
def peekiter(iterable):
"""Return first row and also iterable with same items as original"""
it = iter(iterable)
one = next(it)
def gen():
"""Generator that returns first and proxy other items from source"""
yield one
while True:
yield next(it)
return (one, gen())
def check_palette(palette):
"""
Check a palette argument (to the :class:`Writer` class) for validity.
Returns the palette as a list if okay; raises an exception otherwise.
"""
# None is the default and is allowed.
if palette is None:
return None
p = list(palette)
if not (0 < len(p) <= 256):
raise ValueError("a palette must have between 1 and 256 entries")
seen_triple = False
for i,t in enumerate(p):
if len(t) not in (3,4):
raise ValueError(
"palette entry %d: entries must be 3- or 4-tuples." % i)
if len(t) == 3:
seen_triple = True
if seen_triple and len(t) == 4:
raise ValueError(
"palette entry %d: all 4-tuples must precede all 3-tuples" % i)
for x in t:
if int(x) != x or not(0 <= x <= 255):
raise ValueError(
"palette entry %d: values must be integer: 0 <= x <= 255" % i)
return p
def check_sizes(size, width, height):
"""
Check that these arguments, in supplied, are consistent.
Return a (width, height) pair.
"""
if not size:
return width, height
if len(size) != 2:
raise ValueError(
"size argument should be a pair (width, height)")
if width is not None and width != size[0]:
raise ValueError(
"size[0] (%r) and width (%r) should match when both are used."
% (size[0], width))
if height is not None and height != size[1]:
raise ValueError(
"size[1] (%r) and height (%r) should match when both are used."
% (size[1], height))
return size
def check_color(c, greyscale, which):
"""
Checks that a colour argument is the right form.
Returns the colour
(which, if it's a bar integer, is "corrected" to a 1-tuple).
For transparent or background options.
"""
if c is None:
return c
if greyscale:
try:
len(c)
except TypeError:
c = (c,)
if len(c) != 1:
raise ValueError("%s for greyscale must be 1-tuple" %
which)
if not isinteger(c[0]):
raise ValueError(
"%s colour for greyscale must be integer" % which)
else:
if not (len(c) == 3 and
isinteger(c[0]) and
isinteger(c[1]) and
isinteger(c[2])):
raise ValueError(
"%s colour must be a triple of integers" % which)
return c
def check_time(value):
"""Convert time from most popular representations to datetime"""
if value is None:
return None
if isinstance(value, (time.struct_time, tuple)):
return value
if isinstance(value, datetime.datetime):
return value.timetuple()
if isinstance(value, datetime.date):
res = datetime.datetime.utcnow()
res.replace(year=value.year, month=value.month, day=value.day)
return res.timetuple()
if isinstance(value, datetime.time):
return datetime.datetime.combine(datetime.date.today(),
value).timetuple()
if isinteger(value):
# Handle integer as timestamp
return time.gmtime(value)
if isinstance(value, basestring):
if value.lower() == 'now':
return time.gmtime()
# TODO: parsinng some popular strings
raise ValueError("Unsupported time representation:" + repr(value))
def popdict(src, keys):
"""
Extract all keys (with values) from `src` dictionary as new dictionary
values are removed from source dictionary.
"""
new = {}
for key in keys:
if key in src:
new[key] = src.pop(key)
return new
def try_greyscale(pixels, alpha=False, dirty_alpha=True):
"""
Check if flatboxed RGB `pixels` could be converted to greyscale
If could - return iterator with greyscale pixels,
otherwise return `False` constant
"""
planes = 3 + bool(alpha)
res = list()
apix = list()
for row in pixels:
green = row[1::planes]
if alpha:
apix.append(row[4:planes])
if (green != row[0::planes] or green != row[2::planes]):
return False
else:
res.append(green)
if alpha:
return MergedPlanes(res, 1, apix, 1)
else:
return res
class Error(Exception):
"""Generic PurePNG error"""
def __str__(self):
return self.__class__.__name__ + ': ' + ' '.join(self.args)
class FormatError(Error):
"""
Problem with input file format.
In other words, PNG file does
not conform to the specification in some way and is invalid.
"""
class ChunkError(FormatError):
"""Error in chunk handling"""
class BaseFilter(object):
"""
Basic methods of filtering and other byte manipulations
This part can be compile with Cython (see README.cython)
Private methods are declared as 'cdef' (unavailable from python)
for this compilation, so don't just rename it.
"""
def __init__(self, bitdepth=8):
if bitdepth > 8:
self.fu = bitdepth // 8
else:
self.fu = 1
def __undo_filter_sub(self, scanline):
"""Undo sub filter."""
ai = 0
# Loops starts at index fu.
for i in range(self.fu, len(scanline)):
x = scanline[i]
a = scanline[ai] # result
scanline[i] = (x + a) & 0xff # result
ai += 1
def __do_filter_sub(self, scanline, result):
"""Sub filter."""
ai = 0
for i in range(self.fu, len(result)):
x = scanline[i]
a = scanline[ai]
result[i] = (x - a) & 0xff
ai += 1
def __undo_filter_up(self, scanline):
"""Undo up filter."""
previous = self.prev
for i in range(len(scanline)):
x = scanline[i]
b = previous[i]
scanline[i] = (x + b) & 0xff # result
def __do_filter_up(self, scanline, result):
"""Up filter."""
previous = self.prev
for i in range(len(result)):
x = scanline[i]
b = previous[i]
result[i] = (x - b) & 0xff
def __undo_filter_average(self, scanline):
"""Undo average filter."""
ai = -self.fu
previous = self.prev
for i in range(len(scanline)):
x = scanline[i]
if ai < 0:
a = 0
else:
a = scanline[ai] # result
b = previous[i]
scanline[i] = (x + ((a + b) >> 1)) & 0xff # result
ai += 1
def __do_filter_average(self, scanline, result):
"""Average filter."""
ai = -self.fu
previous = self.prev
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = 0
else:
a = scanline[ai]
b = previous[i]
result[i] = (x - ((a + b) >> 1)) & 0xff
ai += 1
def __undo_filter_paeth(self, scanline):
"""Undo Paeth filter."""
ai = -self.fu
previous = self.prev
for i in range(len(scanline)):
x = scanline[i]
if ai < 0:
pr = previous[i] # a = c = 0
else:
a = scanline[ai] # result
c = previous[ai]
b = previous[i]
pa = abs(b - c) # b
pb = abs(a - c) # 0
pc = abs(a + b - c - c) # b
if pa <= pb and pa <= pc: # False
pr = a
elif pb <= pc: # True
pr = b
else:
pr = c
scanline[i] = (x + pr) & 0xff # result
ai += 1
def __do_filter_paeth(self, scanline, result):
"""Paeth filter."""
# http://www.w3.org/TR/PNG/#9Filter-type-4-Paeth
ai = -self.fu
previous = self.prev
for i in range(len(result)):
x = scanline[i]
if ai < 0:
pr = previous[i] # a = c = 0
else:
a = scanline[ai]
c = previous[ai]
b = previous[i]
pa = abs(b - c)
pb = abs(a - c)
pc = abs(a + b - c - c)
if pa <= pb and pa <= pc:
pr = a
elif pb <= pc:
pr = b
else:
pr = c
result[i] = (x - pr) & 0xff
ai += 1
def undo_filter(self, filter_type, line):
"""
Undo the filter for a scanline.
`scanline` is a sequence of bytes that does not include
the initial filter type byte.
The scanline will have the effects of filtering removed.
Scanline modified inplace and also returned as result.
"""
assert 0 <= filter_type <= 4
# For the first line of a pass, synthesize a dummy previous line.
if self.prev is None:
self.prev = newBarray(len(line))
# Also it's possible to switch some filters to easier
if filter_type == 2: # "up"
filter_type = 0
elif filter_type == 4: # "paeth"
filter_type = 1
# Call appropriate filter algorithm.
# 0 - do nothing
if filter_type == 1:
self.__undo_filter_sub(line)
elif filter_type == 2:
self.__undo_filter_up(line)
elif filter_type == 3:
self.__undo_filter_average(line)
elif filter_type == 4:
self.__undo_filter_paeth(line)
# This will not work writing cython attributes from python
# Only 'cython from cython' or 'python from python'
self.prev[:] = line[:]
return line
def _filter_scanline(self, filter_type, line, result):
"""
Apply a scanline filter to a scanline.
`filter_type` specifies the filter type (0 to 4)
'line` specifies the current (unfiltered) scanline as a sequence
of bytes;
"""
assert 0 <= filter_type < 5
if self.prev is None:
# We're on the first line. Some of the filters can be reduced
# to simpler cases which makes handling the line "off the top"
# of the image simpler. "up" becomes "none"; "paeth" becomes
# "left" (non-trivial, but true). "average" needs to be handled
# specially.
if filter_type == 2: # "up"
filter_type = 0
elif filter_type == 3:
self.prev = newBarray(len(line))
elif filter_type == 4: # "paeth"
filter_type = 1
if filter_type == 1:
self.__do_filter_sub(line, result)
elif filter_type == 2:
self.__do_filter_up(line, result)
elif filter_type == 3:
self.__do_filter_average(line, result)
elif filter_type == 4:
self.__do_filter_paeth(line, result)
# Todo: color conversion functions should be moved
# to a separate part in future
def convert_la_to_rgba(self, row, result):
"""Convert a grayscale image with alpha to RGBA."""
for i in range(len(row) // 3):
for j in range(3):
result[(4 * i) + j] = row[2 * i]
result[(4 * i) + 3] = row[(2 * i) + 1]
def convert_l_to_rgba(self, row, result):
"""
Convert a grayscale image to RGBA.
This method assumes the alpha channel in result is already
correctly initialized.
"""
for i in range(len(row) // 3):
for j in range(3):
result[(4 * i) + j] = row[i]
def convert_rgb_to_rgba(self, row, result):
"""
Convert an RGB image to RGBA.
This method assumes the alpha channel in result is already
correctly initialized.
"""
for i in range(len(row) // 3):
for j in range(3):
result[(4 * i) + j] = row[(3 * i) + j]
class Writer(object):
"""PNG encoder in pure Python."""
def __init__(self, width=None, height=None,
greyscale=False,
alpha=False,
bitdepth=8,
palette=None,
transparent=None,
background=None,
gamma=None,
compression=None,
interlace=False,
chunk_limit=2 ** 20,
icc_profile=None,
**kwargs
):
"""
Create a PNG encoder object.
Arguments:
width, height
Image size in pixels, as two separate arguments.
greyscale
Input data is greyscale, not RGB.
alpha
Input data has alpha channel (RGBA or LA).
bitdepth
Bit depth: from 1 to 16.
palette
Create a palette for a colour mapped image (colour type 3).
transparent
Specify a transparent colour (create a ``tRNS`` chunk).
background
Specify a default background colour (create a ``bKGD`` chunk).
gamma
Specify a gamma value (create a ``gAMA`` chunk).
compression
zlib compression level: 0 (none) to 9 (more compressed);
default: -1 or None.
interlace
Create an interlaced image.
chunk_limit
Write multiple ``IDAT`` chunks to save memory.
icc_profile
tuple of (`name`, `databytes`) or just data bytes
to write ICC Profile
Extra keywords:
text
see :meth:`set_text`
modification_time
see :meth:`set_modification_time`
resolution
see :meth:`set_resolution`
filter_type
Enable and specify PNG filter
see :meth:`set_filter_type`
The image size (in pixels) can be specified either by using the
`width` and `height` arguments, or with the single `size`
argument. If `size` is used it should be a pair (*width*,
*height*).
`greyscale` and `alpha` are booleans that specify whether
an image is greyscale (or colour), and whether it has an
alpha channel (or not).
`bitdepth` specifies the bit depth of the source pixel values.
Each source pixel value must be an integer between 0 and
``2**bitdepth-1``. For example, 8-bit images have values
between 0 and 255. PNG only stores images with bit depths of
1,2,4,8, or 16. When `bitdepth` is not one of these values,
the next highest valid bit depth is selected, and an ``sBIT``
(significant bits) chunk is generated that specifies the
original precision of the source image. In this case the
supplied pixel values will be rescaled to fit the range of
the selected bit depth.
The details of which bit depth / colour model combinations the
PNG file format supports directly, are somewhat arcane
(refer to the PNG specification for full details). Briefly:
"small" bit depths (1,2,4) are only allowed with greyscale and
colour mapped images; colour mapped images cannot have bit depth
16.
For colour mapped images (in other words, when the `palette`
argument is specified) the `bitdepth` argument must match one of
the valid PNG bit depths: 1, 2, 4, or 8. (It is valid to have a
PNG image with a palette and an ``sBIT`` chunk, but the meaning
is slightly different; it would be awkward to press the
`bitdepth` argument into service for this.)
The `palette` option, when specified, causes a colour mapped image
to be created: the PNG colour type is set to 3; `greyscale` must not
be set; `alpha` must not be set; `transparent` must not be set;
the bit depth must be 1, 2, 4, or 8.
When a colour mapped image is created, the pixel values
are palette indexes and the `bitdepth` argument specifies the size
of these indexes (not the size of the colour values in the palette).
The palette argument value should be a sequence of 3- or
4-tuples. 3-tuples specify RGB palette entries; 4-tuples
specify RGBA palette entries. If both 4-tuples and 3-tuples
appear in the sequence then all the 4-tuples must come
before all the 3-tuples. A ``PLTE`` chunk is created; if there
are 4-tuples then a ``tRNS`` chunk is created as well. The
``PLTE`` chunk will contain all the RGB triples in the same
sequence; the ``tRNS`` chunk will contain the alpha channel for
all the 4-tuples, in the same sequence. Palette entries
are always 8-bit.
If specified, the `transparent` and `background` parameters must
be a tuple with three integer values for red, green, blue, or
a simple integer (or singleton tuple) for a greyscale image.
If specified, the `gamma` parameter must be a positive number
(generally, a `float`). A ``gAMA`` chunk will be created.
Note that this will not change the values of the pixels as
they appear in the PNG file, they are assumed to have already
been converted appropriately for the gamma specified.
The `compression` argument specifies the compression level to
be used by the ``zlib`` module. Values from 1 to 9 specify
compression, with 9 being "more compressed" (usually smaller
and slower, but it doesn't always work out that way). 0 means
no compression. -1 and ``None`` both mean that the default
level of compession will be picked by the ``zlib`` module
(which is generally acceptable).
If `interlace` is true then an interlaced image is created
(using PNG's so far only interace method, *Adam7*). This does
not affect how the pixels should be presented to the encoder,
rather it changes how they are arranged into the PNG file.
On slow connexions interlaced images can be partially decoded
by the browser to give a rough view of the image that is
successively refined as more image data appears.
.. note ::
Enabling the `interlace` option requires the entire image
to be processed in working memory.
`chunk_limit` is used to limit the amount of memory used whilst
compressing the image. In order to avoid using large amounts of
memory, multiple ``IDAT`` chunks may be created.
"""
width, height = check_sizes(kwargs.pop('size', None),
width, height)
if width <= 0 or height <= 0:
raise ValueError("width and height must be greater than zero")
if not isinteger(width) or not isinteger(height):
raise ValueError("width and height must be integers")
# http://www.w3.org/TR/PNG/#7Integers-and-byte-order
if width > 2**32-1 or height > 2**32-1:
raise ValueError("width and height cannot exceed 2**32-1")
if alpha and transparent is not None:
raise ValueError(
"transparent colour not allowed with alpha channel")
if 'bytes_per_sample' in kwargs and not bitdepth:
warnings.warn('please use bitdepth instead of bytes_per_sample',
DeprecationWarning)
if kwargs['bytes_per_sample'] not in (0.125, 0.25, 0.5, 1, 2):
raise ValueError(
"bytes per sample must be .125, .25, .5, 1, or 2")
bitdepth = int(8 * kwargs.pop('bytes_per_sample'))
if 'resolution' not in kwargs and 'physical' in kwargs:
kwargs['resolution'] = kwargs.pop('physical')
warnings.warn('please use resolution instead of physilcal',
DeprecationWarning)
if not isinteger(bitdepth) or bitdepth < 1 or 16 < bitdepth:
raise ValueError("bitdepth (%r) must be a postive integer <= 16" %
bitdepth)
self.pixbitdepth = bitdepth
self.palette = check_palette(palette)
if self.palette:
if bitdepth not in (1, 2, 4, 8):
raise ValueError("with palette bitdepth must be 1, 2, 4, or 8")
if transparent is not None:
raise ValueError("transparent and palette not compatible")
if alpha:
raise ValueError("alpha and palette not compatible")
if greyscale:
if greyscale == 'try':
greyscale = False
raise ValueError("greyscale and palette not compatible")
self.transparent = check_color(transparent, greyscale, 'transparent')
self.background = check_color(background, greyscale, 'background')
# At the moment the `planes` argument is ignored;
# its purpose is to act as a dummy so that
# ``Writer(x, y, **info)`` works, where `info` is a dictionary
# returned by Reader.read and friends.
# Ditto for `colormap` and `maxval`.
popdict(kwargs, ('planes', 'colormap', 'maxval'))
for ex_kw in ('filter_type', 'text', 'resolution', 'modification_time',
'rendering_intent', 'white_point', 'rgb_points'):
getattr(self, 'set_' + ex_kw)(kwargs.pop(ex_kw, None))
# Keyword text support
kw_text = popdict(kwargs, _registered_kw)
if kw_text:
kw_text.update(self.text)
self.set_text(kw_text)
if kwargs:
warnings.warn("Unknown writer args: " + str(kwargs))
# It's important that the true boolean values (greyscale, alpha,
# colormap, interlace) are converted to bool because Iverson's
# convention is relied upon later on.
self.width = width
self.height = height
self.gamma = gamma
if icc_profile:
if 'icc_profile_name' in kwargs:
warnings.warn("Use tuple (`name`, `data`) to provide"
" ICC Profile name", DeprecationWarning)
self.set_icc_profile(icc_profile, kwargs['icc_profile_name'])
else:
self.set_icc_profile(icc_profile)
else:
self.icc_profile = None
if greyscale == 'try':
self.greyscale = 'try'
else:
self.greyscale = bool(greyscale)
self.alpha = bool(alpha)
self.bitdepth = int(bitdepth)
self.compression = compression
self.chunk_limit = chunk_limit
self.interlace = bool(interlace)
if bool(self.palette) and (self.greyscale or self.alpha):
raise FormatError("Paletted image could not be grayscale or"
" contain alpha plane")
self.planes = (3, 1)[(self.greyscale and self.greyscale != 'try') or
bool(self.palette)] + self.alpha
def set_icc_profile(self, profile=None, name='ICC Profile'):
"""
Add ICC Profile.
Prefered way is tuple (`profile_name`, `profile_bytes`), but only
bytes with name as separate argument is also supported.
"""
if isinstance(profile, (basestring, bytes)):
icc_profile = [name, profile]
# TODO: more check
else:
icc_profile = profile
if not icc_profile[0]:
raise Error("ICC profile should have a name")
elif not isinstance(icc_profile[0], bytes):
icc_profile[0] = strtobytes(icc_profile[0])
self.icc_profile = icc_profile
def set_text(self, text=None, **kwargs):
"""
Add textual information passed as dictionary.
All pairs in dictionary will be written, but keys should be latin-1;
registered keywords could be used as arguments.
When called more than once overwrite exist data.
"""
if text is None:
text = {}
text.update(popdict(kwargs, _registered_kw))
if 'Creation Time' in text and\
not isinstance(text['Creation Time'], (basestring, bytes)):
text['Creation Time'] = datetime.datetime(
*(check_time(text['Creation Time'])[:6])).isoformat()
self.text = text
def set_filter_type(self, filter_type=None):
"""
Set(modify) filtering mode for better compression
`filter_type` is number or name of filter type for better compression
see http://www.w3.org/TR/PNG/#9Filter-types for details
It's also possible to use adaptive strategy for choosing filter type
per row. Predefined strategies are `sum` and `entropy`.
Custom strategies can be added with :meth:`register_extra_filter` or
be callable passed with this argument.
(see more at :meth:`register_extra_filter`)
"""
if filter_type is None:
filter_type = 0
elif isinstance(filter_type, basestring):
str_ftype = str(filter_type).lower()
filter_names = {'none': 0,
'sub': 1,
'up': 2,
'average': 3,
'paeth': 4}
if str_ftype in filter_names:
filter_type = filter_names[str_ftype]
self.filter_type = filter_type
def set_modification_time(self, modification_time=True):
"""
Add time to be written as last modification time
When called after initialisation configure to use
time of writing file
"""
if (isinstance(modification_time, basestring) and
modification_time.lower() == 'write') or\
modification_time is True:
self.modification_time = True
else:
self.modification_time = check_time(modification_time)
def set_resolution(self, resolution=None):
"""
Add physical pixel dimensions
`resolution` supposed two be tuple of two parameterts: pixels per unit
and unit type; unit type may be omitted
pixels per unit could be simple integer or tuple of (ppu_x, ppu_y)
Also possible to use all three parameters im row
* resolution = ((1, 4), ) # wide pixels (4:1) without unit specifier
* resolution = (300, 'inch') # 300dpi in both dimensions
* resolution = (4, 1, 0) # tall pixels (1:4) without unit specifier
"""
if resolution is None:
self.resolution = None
return
# All in row
if len(resolution) == 3:
resolution = ((resolution[0], resolution[1]), resolution[2])
# Ensure length and convert all false to 0 (no unit)
if len(resolution) == 1 or not resolution[1]:
resolution = (resolution[0], 0)
# Single dimension
if isinstance(resolution[0], float) or isinteger(resolution[0]):
resolution = ((resolution[0], resolution[0]), resolution[1])
# Unit conversion
if resolution[1] in (1, 'm', 'meter'):
resolution = (resolution[0], 1)
elif resolution[1] in ('i', 'in', 'inch'):
resolution = ((int(resolution[0][0] / 0.0254 + 0.5),
int(resolution[0][1] / 0.0254 + 0.5)), 1)
elif resolution[1] in ('cm', 'centimeter'):
resolution = ((resolution[0][0] * 100,
resolution[0][1] * 100), 1)
self.resolution = resolution
def set_rendering_intent(self, rendering_intent):
"""Set rendering intent variant for sRGB chunk"""
if rendering_intent not in (None,
PERCEPTUAL,
RELATIVE_COLORIMETRIC,
SATURATION,
ABSOLUTE_COLORIMETRIC):
raise FormatError('Unknown redering intent')
self.rendering_intent = rendering_intent
def set_white_point(self, white_point, point2=None):
"""Set white point part of cHRM chunk"""
if isinstance(white_point, float) and isinstance(point2, float):
white_point = (white_point, point2)
self.white_point = white_point
def set_rgb_points(self, rgb_points, *args):
"""Set rgb points part of cHRM chunk"""
if not args:
self.rgb_points = rgb_points
# separate tuples
elif len(args) == 2:
self.rgb_points = (rgb_points, args[0], args[1])
# separate numbers
elif len(args) == 5:
self.rgb_points = ((rgb_points, args[0]),
(args[1], args[2]),
(args[3], args[4]))
def __write_palette(self, outfile):
"""
Write``PLTE`` and if necessary a ``tRNS`` chunk to.
This method should be called only from ``write_idat`` method
or chunk order will be ruined.
"""
# We must have true bitdepth within palette to use sBIT with palette
# if self.pixbitdepth != 8:
# write_chunk(outfile, 'sBIT',
# bytearray_to_bytes(bytearray((self.pixbitdepth,) * 3)))
p = bytearray()
t = bytearray()
for x in self.palette:
p.extend(x[0:3])
if len(x) > 3:
t.append(x[3])
write_chunk(outfile, 'PLTE', bytearray_to_bytes(p))
if t:
# tRNS chunk is optional. Only needed if palette entries
# have alpha.
write_chunk(outfile, 'tRNS', bytearray_to_bytes(t))
def __write_srgb(self, outfile):
"""
Write colour reference information: gamma, iccp etc.
This method should be called only from ``write_idat`` method
or chunk order will be ruined.
"""
if self.rendering_intent is not None and self.icc_profile is not None:
raise FormatError("sRGB(via rendering_intent) and iCCP could not"
"be present simultaneously")
# http://www.w3.org/TR/PNG/#11sRGB
if self.rendering_intent is not None:
write_chunk(outfile, 'sRGB',
struct.pack("B", int(self.rendering_intent)))
# http://www.w3.org/TR/PNG/#11cHRM
if (self.white_point is not None and self.rgb_points is None) or\
(self.white_point is None and self.rgb_points is not None):
logging.warn("White and RGB points should be both specified to"
" write cHRM chunk")
self.white_point = None
self.rgb_points = None
if (self.white_point is not None and self.rgb_points is not None):
data = (self.white_point[0], self.white_point[1],
self.rgb_points[0][0], self.rgb_points[0][1],
self.rgb_points[1][0], self.rgb_points[1][1],
self.rgb_points[2][0], self.rgb_points[2][1],
)
write_chunk(outfile, 'cHRM',
struct.pack("!8L",
*[int(round(it * 1e5)) for it in data]))
# http://www.w3.org/TR/PNG/#11gAMA
if self.gamma is not None:
write_chunk(outfile, 'gAMA',
struct.pack("!L", int(round(self.gamma * 1e5))))
# http://www.w3.org/TR/PNG/#11iCCP
if self.icc_profile is not None:
if self.compression is None or self.compression == -1:
comp_level = 6 # zlib.Z_DEFAULT_COMPRESSION
else:
comp_level = self.compression
write_chunk(outfile, 'iCCP',
self.icc_profile[0] + zerobyte +
zerobyte +
zlib.compress(self.icc_profile[1], comp_level))
def __write_text(self, outfile):
"""
Write text information into file
This method should be called only from ``write_idat`` method
or chunk order will be ruined.
"""
for k, v in self.text.items():
if not isinstance(v, bytes):
try:
international = False
v = v.encode('latin-1')
except UnicodeEncodeError:
international = True
v = v.encode('utf-8')
else:
international = False
if not isinstance(k, bytes):
k = strtobytes(k)
if international:
# No compress, language tag or translated keyword for now
write_chunk(outfile, 'iTXt', k + zerobyte +
zerobyte + zerobyte +
zerobyte + zerobyte + v)
else:
write_chunk(outfile, 'tEXt', k + zerobyte + v)
def write(self, outfile, rows):
"""
Write a PNG image to the output file.
`rows` should be an iterable that yields each row in boxed row
flat pixel format. The rows should be the rows of the original
image, so there should be ``self.height`` rows of ``self.width *
self.planes`` values. If `interlace` is specified (when
creating the instance), then an interlaced PNG file will
be written. Supply the rows in the normal image order;
the interlacing is carried out internally.
.. note ::
Interlacing will require the entire image to be in working
memory.
"""
if self.interlace:
fmt = 'BH'[self.bitdepth > 8]
a = array(fmt, itertools.chain(*rows))
return self.write_array(outfile, a)
else:
nrows = self.write_passes(outfile, rows)
if nrows != self.height:
raise ValueError(
"rows supplied (%d) does not match height (%d)" %
(nrows, self.height))
def write_passes(self, outfile, rows, packed=False):
"""
Write a PNG image to the output file.
Most users are expected to find the :meth:`write` or
:meth:`write_array` method more convenient.
The rows should be given to this method in the order that
they appear in the output file. For straightlaced images,
this is the usual top to bottom ordering, but for interlaced
images the rows should have already been interlaced before
passing them to this function.
`rows` should be an iterable that yields each row. When
`packed` is ``False`` the rows should be in boxed row flat pixel
format; when `packed` is ``True`` each row should be a packed
sequence of bytes.
"""
# Try to optimize
if not packed:
if self.greyscale == 'try':
rows1, rows2 = tee(rows)
greyrows = try_greyscale(rows1, self.alpha)
if greyrows is not False:
rows = greyrows
self.greyscale = True
self.planes -= 2
else:
self.greyscale = False
rows = rows2
if not self.palette:
# No palette, check for rescale
targetbitdepth = None
srcbitdepth = self.bitdepth
if self.alpha or not self.greyscale:
if self.bitdepth not in (8, 16):
targetbitdepth = (8, 16)[self.bitdepth > 8]
else:
assert self.greyscale
assert not self.alpha
if self.bitdepth not in (1, 2, 4, 8, 16):
if self.bitdepth > 8:
targetbitdepth = 16
elif self.bitdepth == 3:
targetbitdepth = 4
else:
assert self.bitdepth in (5, 6, 7)
targetbitdepth = 8
if targetbitdepth:
if packed:
raise Error("writing packed pixels not suitable for"
" bit depth %d" % self.bitdepth)
self.bitdepth = targetbitdepth
factor = \
float(2**targetbitdepth - 1) / float(2**srcbitdepth - 1)
def scalerow(inrows):
"""Rescale all pixels"""
for row in inrows:
yield [int(round(factor * x)) for x in row]
rows = scalerow(rows)
self.write_idat(outfile, self.comp_idat(self.idat(rows, packed)))
return self.irows
def write_idat(self, outfile, idat_sequence):
"""
Write png with IDAT to file
`idat_sequence` should be iterable that produce IDAT chunks
compatible with `Writer` configuration.
"""
# http://www.w3.org/TR/PNG/#5PNG-file-signature
outfile.write(png_signature)
color_type = 4 * self.alpha + 2 * (not self.greyscale) +\
bool(self.palette)
# http://www.w3.org/TR/PNG/#11IHDR
write_chunk(outfile, 'IHDR',
struct.pack("!2I5B", self.width, self.height,
self.bitdepth, color_type,
0, 0, self.interlace))
# See :chunk:order
self.__write_srgb(outfile)
# See :chunk:order
# http://www.w3.org/TR/PNG/#11sBIT
if not self.palette and self.pixbitdepth != self.bitdepth:
# Write sBIT of palette within __write_palette
# TODO: support different bitdepth per plane
write_chunk(outfile, 'sBIT',
struct.pack('%dB' % self.planes,
*[self.pixbitdepth] * self.planes))
# :chunk:order: Without a palette (PLTE chunk), ordering is
# relatively relaxed. With one, gamma info must precede PLTE
# chunk which must precede tRNS and bKGD.
# See http://www.w3.org/TR/PNG/#5ChunkOrdering
if self.palette:
self.__write_palette(outfile)
# http://www.w3.org/TR/PNG/#11tRNS
if self.transparent is not None:
if self.greyscale:
write_chunk(outfile, 'tRNS',
struct.pack("!1H", *self.transparent))
else:
write_chunk(outfile, 'tRNS',
struct.pack("!3H", *self.transparent))
# http://www.w3.org/TR/PNG/#11bKGD
if self.background is not None:
if self.greyscale:
write_chunk(outfile, 'bKGD',
struct.pack("!1H", *self.background))
else:
write_chunk(outfile, 'bKGD',
struct.pack("!3H", *self.background))
# http://www.w3.org/TR/PNG/#11pHYs
if self.resolution is not None:
write_chunk(outfile, 'pHYs',
struct.pack("!IIB",
self.resolution[0][0],
self.resolution[0][1],
self.resolution[1]))
# http://www.w3.org/TR/PNG/#11tIME
if self.modification_time is not None:
if self.modification_time is True:
self.modification_time = check_time('now')
write_chunk(outfile, 'tIME',
struct.pack("!H5B", *(self.modification_time[:6])))
# http://www.w3.org/TR/PNG/#11textinfo
if self.text:
self.__write_text(outfile)
for idat in idat_sequence:
write_chunk(outfile, 'IDAT', idat)
# http://www.w3.org/TR/PNG/#11IEND
write_chunk(outfile, 'IEND')
def comp_idat(self, idat):
"""Generator that produce compressed IDAT chunks from IDAT data"""
# http://www.w3.org/TR/PNG/#11IDAT
if self.compression is not None:
compressor = zlib.compressobj(self.compression)
else:
compressor = zlib.compressobj()
for dat in idat:
compressed = compressor.compress(dat)
if len(compressed):
yield compressed
flushed = compressor.flush()
if len(flushed):
yield flushed
def idat(self, rows, packed=False):
"""Generator that produce uncompressed IDAT data from rows"""
# http://www.w3.org/TR/PNG/#11IDAT
filt = Filter(self.bitdepth * self.planes,
self.interlace, self.height)
data = bytearray()
def byteextend(rowbytes):
"""Default extending data with bytes. Applying filter"""
data.extend(filt.do_filter(self.filter_type, rowbytes))
# Choose an extend function based on the bitdepth. The extend
# function packs/decomposes the pixel values into bytes and
# stuffs them onto the data array.
if self.bitdepth == 8 or packed:
extend = byteextend
elif self.bitdepth == 16:
def extend(sl):
"""Decompose into bytes before byteextend"""
fmt = '!%dH' % len(sl)
byteextend(bytearray(struct.pack(fmt, *sl)))
else:
# Pack into bytes
assert self.bitdepth < 8
# samples per byte
spb = 8 // self.bitdepth
def extend(sl):
"""Pack into bytes before byteextend"""
a = bytearray(sl)
# Adding padding bytes so we can group into a whole
# number of spb-tuples.
l = float(len(a))
extra = math.ceil(l / float(spb)) * spb - l
a.extend([0] * int(extra))
# Pack into bytes
l = group(a, spb)
l = [reduce(lambda x, y: (x << self.bitdepth) + y, e)
for e in l]
byteextend(l)
# Build the first row, testing mostly to see if we need to
# changed the extend function to cope with NumPy integer types
# (they cause our ordinary definition of extend to fail, so we
# wrap it). See
# http://code.google.com/p/pypng/issues/detail?id=44
enumrows = enumerate(rows)
del rows
# :todo: Certain exceptions in the call to ``.next()`` or the
# following try would indicate no row data supplied.
# Should catch.
i, row = next(enumrows)
try:
# If this fails...
extend(row)
except:
# ... try a version that converts the values to int first.
# Not only does this work for the (slightly broken) NumPy
# types, there are probably lots of other, unknown, "nearly"
# int types it works for.
def wrapmapint(f):
return lambda sl: f([int(x) for x in sl])
extend = wrapmapint(extend)
del wrapmapint
extend(row)
for i, row in enumrows:
extend(row)
if len(data) > self.chunk_limit:
yield bytearray_to_bytes(data)
# Because of our very witty definition of ``extend``,
# above, we must re-use the same ``data`` object. Hence
# we use ``del`` to empty this one, rather than create a
# fresh one (which would be my natural FP instinct).
del data[:]
if len(data):
yield bytearray_to_bytes(data)
self.irows = i + 1
def write_array(self, outfile, pixels):
"""
Write an array in flat row flat pixel format as a PNG file on
the output file. See also :meth:`write` method.
"""
if self.interlace:
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.array_scanlines(pixels))
def write_packed(self, outfile, rows):
"""
Write PNG file to `outfile`.
The pixel data comes from `rows` which should be in boxed row
packed format. Each row should be a sequence of packed bytes.
Technically, this method does work for interlaced images but it
is best avoided. For interlaced images, the rows should be
presented in the order that they appear in the file.
This method should not be used when the source image bit depth
is not one naturally supported by PNG; the bit depth should be
1, 2, 4, 8, or 16.
"""
return self.write_passes(outfile, rows, packed=True)
def array_scanlines(self, pixels):
"""Generates boxed rows (flat pixels) from flat rows in an array."""
# Values per row
vpr = self.width * self.planes
stop = 0
for y in range(self.height):
start = stop
stop = start + vpr
yield pixels[start:stop]
def array_scanlines_interlace(self, pixels):
"""
Generator for interlaced scanlines from an array.
`pixels` is the full source image in flat row flat pixel format.
The generator yields each scanline of the reduced passes in turn, in
boxed row flat pixel format.
"""
# http://www.w3.org/TR/PNG/#8InterlaceMethods
# Array type.
fmt = 'BH'[self.bitdepth > 8]
# Value per row
vpr = self.width * self.planes
for xstart, ystart, xstep, ystep in _adam7:
if xstart >= self.width:
continue
# Pixels per row (of reduced image)
ppr = int(math.ceil((self.width-xstart)/float(xstep)))
# number of values in reduced image row.
row_len = ppr*self.planes
for y in range(ystart, self.height, ystep):
end_offset = (y + 1) * vpr
if xstep == 1:
# Last pass (0, 1, 1, 2))
offset = y * vpr
yield pixels[offset:end_offset]
else:
row = array(fmt)
# There's no easier way to set the length of an array
row.extend(pixels[0:row_len])
offset = y * vpr + xstart * self.planes
for i in range(self.planes):
row[i::self.planes] = \
pixels[offset + i:end_offset:self.planes * xstep]
yield row
def write_chunk(outfile, tag, data=bytes()):
"""Write a PNG chunk to the output file, including length and checksum."""
# http://www.w3.org/TR/PNG/#5Chunk-layout
outfile.write(struct.pack("!I", len(data)))
tag = strtobytes(tag)
outfile.write(tag)
outfile.write(data)
checksum = zlib.crc32(tag)
checksum = zlib.crc32(data, checksum)
checksum &= 0xFFFFFFFF
outfile.write(struct.pack("!I", checksum))
def write_chunks(out, chunks):
"""Create a PNG file by writing out the chunks."""
out.write(png_signature)
for chunk in chunks:
write_chunk(out, *chunk)
class MergedPlanes(object):
"""Merge two flatboxed iterator as new iterator"""
def __init__(self, seq_left, nplanes_left, seq_right, nplanes_right,
bitdepth=None, width=None):
"""
Initialise merge iterator with sources
Each row of this iterator is array of pixels consisting
of the `nplanes_left` elements of data from row of `seq_left'
iterator followed by the `nplanes_right` elements of data
from row of `seq_right` iterator.
`seq_right` also may be integer instead of iterator this represent
filling pixels with this value
"""
self.seq_left = seq_left
self.nplanes_left = nplanes_left
if isinstance(seq_right, int):
self.seq_right = self.rigthgen(seq_right)
else:
self.seq_right = seq_right
self.nplanes_right = nplanes_right
self.nplanes_res = nplanes_left + nplanes_right
self.bitdepth = bitdepth
self.width = width
def newarray(self, length, value=0):
"""Initialise empty row"""
if self.bitdepth > 8:
return array('H', [value] * length)
else:
return bytearray([value] * length)
def rigthgen(self, value=0):
"""Generate rows to fill right pixels in int mode"""
while True:
yield self.newarray(self.nplanes_right * self.width, value)
def next(self):
"""Generate merged row, consuming rows of original iterstors"""
left = next(self.seq_left)
if self.width is None:
self.width = len(left) / self.nplanes_left
if self.bitdepth is None:
# Detect bitdepth
if hasattr(left, 'itemsize'): # array
self.bitdepth = left.itemsize * 8
elif isinstance(left, (bytes, bytearray)): # bytearray
self.bitdepth = 8
else:
raise Error("Unknown bitdepth for merging planes")
right = next(self.seq_right)
rowlength = self.nplanes_res * self.width
new = self.newarray(rowlength)
if type(left) == type(right) == type(new):
# slice assignment
for i in range(self.nplanes_left):
new[i::self.nplanes_res] = left[i::self.nplanes_left]
for i in range(self.nplanes_right):
i_ = i + self.nplanes_left
new[i_::self.nplanes_res] = right[i::self.nplanes_right]
else:
for i in range(self.nplanes_left):
for j in range(self.width):
new[(i * self.nplanes_res) + j] =\
left[(i * self.nplanes_left) + j]
for i in range(self.nplanes_right):
i_ = i + self.nplanes_left
for j in range(self.width):
new[(i_ * self.nplanes_res) + j] =\
right[(i * self.nplanes_right) + j]
return new
def __next__(self):
"""Iterator protocol"""
return self.next()
def __iter__(self):
"""Iterator protocol"""
return self
def interleave_planes(ipixels, apixels, ipsize, apsize):
"""
Interleave (colour) planes, e.g. RGB + A = RGBA.
Return an array of pixels consisting of the `ipsize` elements of
data from each pixel in `ipixels` followed by the `apsize` elements
of data from each pixel in `apixels`. Conventionally `ipixels`
and `apixels` are byte arrays so the sizes are bytes, but it
actually works with any arrays of the same type. The returned
array is the same type as the input arrays which should be the
same type as each other.
"""
warnings.warn('please use `Merge Planes` class instead',
DeprecationWarning)
newi = MergedPlanes([ipixels], ipsize, [apixels], apsize)
return next(newi)
class Filter(BaseFilter):
def __init__(self, bitdepth=8, interlace=None, rows=None, prev=None):
BaseFilter.__init__(self, bitdepth)
if prev is None:
self.prev = None
else:
self.prev = bytearray(prev)
self.interlace = interlace
self.restarts = []
if self.interlace:
for _, off, _, step in _adam7:
self.restarts.append((rows - off - 1 + step) // step)
def filter_all(self, line):
"""Doing all filters for specified line
return filtered lines as list
For using with adaptive filters
"""
lines = [None] * 5
for filter_type in range(5): # range save more than 'optimised' order
res = copyBarray(line)
self._filter_scanline(filter_type, line, res)
res.insert(0, filter_type)
lines[filter_type] = res
return lines
adapt_methods = {}
def adaptive_filter(self, strategy, line):
"""
Applying non-standart filters (e.g. adaptive selection)
`strategy` may be one of following types:
- string - find and use strategy with this name
- dict - find and use strategy by field 'name' of this dict
and use it with this dict as configuration
- callable - use this callable as strategy with empty dict as cfg
check :meth:`register_extra_filter` for documentation)
`line` specifies the current (unfiltered) scanline as a sequence
of bytes;
"""
if isinstance(strategy, (basestring, bytes)):
strategy = {'name': str(strategy)}
if isinstance(strategy, dict):
cfg = strategy
strategy = Filter.adapt_methods.get(cfg['name'])
else:
cfg = {}
if strategy is None:
raise Error("Adaptive strategy not found")
else:
return strategy(line, cfg, self)
def do_filter(self, filter_type, line):
"""
Applying filter, caring about prev line, interlacing etc.
`filter_type` may be integer to apply basic filter or
adaptive strategy with dict
(`name` is reqired field, others may tune strategy)
"""
# Recall that filtering algorithms are applied to bytes,
# not to pixels, regardless of the bit depth or colour type
# of the image.
line = bytearray(line)
if isinstance(filter_type, int):
res = bytearray(line)
self._filter_scanline(filter_type, line, res)
res.insert(0, filter_type) # Add filter type as the first byte
else:
res = self.adaptive_filter(filter_type, line)
self.prev = line
if self.restarts:
self.restarts[0] -= 1
if self.restarts[0] == 0:
del self.restarts[0]
self.prev = None
return res
def register_extra_filter(selector, name):
"""
Register adaptive filter selection strategy for futher usage.
`selector` - callable like ``def(line, cfg, filter_obj)``
- line - line for filtering
- cfg - dict with optional tuning
- filter_obj - instance of this class to get context or apply base filters
callable should return chosen line
`name` - name which may be used later to recall this strategy
"""
Filter.adapt_methods[str(name)] = selector
# Two basic adaptive strategies
def adapt_sum(line, cfg, filter_obj):
"""Determine best filter by sum of all row values"""
lines = filter_obj.filter_all(line)
res_s = [sum(it) for it in lines]
r = res_s.index(min(res_s))
return lines[r]
register_extra_filter(adapt_sum, 'sum')
def adapt_entropy(line, cfg, filter_obj):
"""Determine best filter by dispersion of row values"""
lines = filter_obj.filter_all(line)
res_c = [len(set(it)) for it in lines]
r = res_c.index(min(res_c))
return lines[r]
register_extra_filter(adapt_entropy, 'entropy')
def parse_mode(mode, default_bitdepth=None):
"""Parse PIL-style mode and return tuple (grayscale, alpha, bitdeph)"""
# few special cases
if mode == 'P':
# Don't know what is pallette
raise Error('Unknown colour mode:' + mode)
elif mode == '1':
# Logical
return (True, False, 1)
elif mode == 'I':
# Integer
return (True, False, 16)
# here we go
if mode.startswith('L'):
grayscale = True
mode = mode[1:]
elif mode.startswith('RGB'):
grayscale = False
mode = mode[3:]
else:
raise Error('Unknown colour mode:' + mode)
if mode.startswith('A'):
alpha = True
mode = mode[1:]
else:
alpha = False
bitdepth = default_bitdepth
if mode.startswith(';'):
mode = mode[1:]
if mode:
try:
bitdepth = int(mode)
except (TypeError, ValueError):
raise Error('Unsupported bitdepth mode:' + mode)
return (grayscale, alpha, bitdepth)
def from_array(a, mode=None, info=None):
"""
Create a PNG :class:`Image` object from a 2- or 3-dimensional array.
One application of this function is easy PIL-style saving:
``png.from_array(pixels, 'L').save('foo.png')``.
.. note :
The use of the term *3-dimensional* is for marketing purposes
only. It doesn't actually work. Please bear with us. Meanwhile
enjoy the complimentary snacks (on request) and please use a
2-dimensional array.
Unless they are specified using the *info* parameter, the PNG's
height and width are taken from the array size. For a 3 dimensional
array the first axis is the height; the second axis is the width;
and the third axis is the channel number. Thus an RGB image that is
16 pixels high and 8 wide will use an array that is 16x8x3. For 2
dimensional arrays the first axis is the height, but the second axis
is ``width*channels``, so an RGB image that is 16 pixels high and 8
wide will use a 2-dimensional array that is 16x24 (each row will be
8*3 = 24 sample values).
*mode* is a string that specifies the image colour format in a
PIL-style mode. It can be:
``'L'``
greyscale (1 channel)
``'LA'``
greyscale with alpha (2 channel)
``'RGB'``
colour image (3 channel)
``'RGBA'``
colour image with alpha (4 channel)
The mode string can also specify the bit depth (overriding how this
function normally derives the bit depth, see below). Appending
``';16'`` to the mode will cause the PNG to be 16 bits per channel;
any decimal from 1 to 16 can be used to specify the bit depth.
When a 2-dimensional array is used *mode* determines how many
channels the image has, and so allows the width to be derived from
the second array dimension.
The array is expected to be a ``numpy`` array, but it can be any
suitable Python sequence. For example, a list of lists can be used:
``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``. The exact
rules are: ``len(a)`` gives the first dimension, height;
``len(a[0])`` gives the second dimension; ``len(a[0][0])`` gives the
third dimension, unless an exception is raised in which case a
2-dimensional array is assumed. It's slightly more complicated than
that because an iterator of rows can be used, and it all still
works. Using an iterator allows data to be streamed efficiently.
The bit depth of the PNG is normally taken from the array element's
datatype (but if *mode* specifies a bitdepth then that is used
instead). The array element's datatype is determined in a way which
is supposed to work both for ``numpy`` arrays and for Python
``array.array`` objects. A 1 byte datatype will give a bit depth of
8, a 2 byte datatype will give a bit depth of 16. If the datatype
does not have an implicit size, for example it is a plain Python
list of lists, as above, then a default of 8 is used.
The *info* parameter is a dictionary that can be used to specify
metadata (in the same style as the arguments to the
:class:`png.Writer` class). For this function the keys that are
useful are:
height
overrides the height derived from the array dimensions and allows
*a* to be an iterable.
width
overrides the width derived from the array dimensions.
bitdepth
overrides the bit depth derived from the element datatype (but
must match *mode* if that also specifies a bit depth).
Generally anything specified in the
*info* dictionary will override any implicit choices that this
function would otherwise make, but must match any explicit ones.
For example, if the *info* dictionary has a ``greyscale`` key then
this must be true when mode is ``'L'`` or ``'LA'`` and false when
mode is ``'RGB'`` or ``'RGBA'``.
"""
# typechecks *info* to some extent.
if info is None:
info = {}
else:
info = dict(info)
# Syntax check mode string.
grayscale, alpha, bitdepth = parse_mode(mode)
# Colour format.
if 'greyscale' in info:
if bool(info['greyscale']) != grayscale:
raise Error("info['greyscale'] should match mode.")
info['greyscale'] = grayscale
if 'alpha' in info:
if bool(info['alpha']) != alpha:
raise Error("info['alpha'] should match mode.")
info['alpha'] = alpha
# Get bitdepth from *mode* if possible.
if bitdepth:
if info.get('bitdepth') and bitdepth != info['bitdepth']:
raise Error("mode bitdepth (%d) should match info bitdepth (%d)." %
(bitdepth, info['bitdepth']))
info['bitdepth'] = bitdepth
planes = (3, 1)[grayscale] + alpha
if 'planes' in info:
if info['planes'] != planes:
raise Error("info['planes'] should match mode.")
# Dimensions.
if 'size' in info:
info['width'], info['height'] = check_sizes(info.get('size'),
info.get('width'),
info.get('height'))
if 'height' not in info:
try:
l = len(a)
except TypeError:
raise Error(
"len(a) does not work, supply info['height'] instead.")
info['height'] = l
# In order to work out whether we the array is 2D or 3D we need its
# first row, which requires that we take a copy of its iterator.
# We may also need the first row to derive width and bitdepth.
row, a = peekiter(a)
try:
row[0][0]
threed = True
testelement = row[0]
except (IndexError, TypeError):
threed = False
testelement = row
if 'width' not in info:
if threed:
width = len(row)
else:
width = len(row) // planes
info['width'] = width
# Not implemented yet
assert not threed
if 'bitdepth' not in info:
try:
dtype = testelement.dtype
# goto the "else:" clause. Sorry.
except AttributeError:
try:
# Try a Python array.array.
bitdepth = 8 * testelement.itemsize
except AttributeError:
# We can't determine it from the array element's
# datatype, use a default of 8.
bitdepth = 8
else:
# If we got here without exception, we now assume that
# the array is a numpy array.
if dtype.kind == 'b':
bitdepth = 1
else:
bitdepth = 8 * dtype.itemsize
info['bitdepth'] = bitdepth
for thing in ('width', 'height', 'bitdepth', 'greyscale', 'alpha'):
assert thing in info
return Image(a, info)
# So that refugee's from PIL feel more at home. Not documented.
fromarray = from_array
class Image(object):
"""
A PNG image.
You can create an :class:`Image` object from
an array of pixels by calling :meth:`png.from_array`. It can be
saved to disk with the :meth:`save` method.
"""
def __init__(self, rows, info):
"""The constructor is not public. Please do not call it."""
self.rows = rows
self.info = info
def save(self, file):
"""
Save the image to *file*.
If *file* looks like an open file
descriptor then it is used, otherwise it is treated as a
filename and a fresh file is opened.
In general, you can only call this method once; after it has
been called the first time and the PNG image has been saved, the
source data will have been streamed, and cannot be streamed
again.
"""
w = Writer(**self.info)
try:
file.write
def close():
pass
except AttributeError:
file = open(file, 'wb')
def close():
file.close()
try:
w.write(file, self.rows)
finally:
close()
class _readable(object):
"""A simple file-like interface for strings and arrays."""
def __init__(self, buf):
self.buf = buf
self.offset = 0
def read(self, n):
"""Read `n` chars from buffer"""
r = self.buf[self.offset:self.offset + n]
if isinstance(r, array):
r = r.tostring()
self.offset += n
return r
class Reader(object):
"""PNG decoder in pure Python."""
def __init__(self, _guess=None, **kw):
"""
Create a PNG decoder object.
The constructor expects exactly one keyword argument. If you
supply a positional argument instead, it will guess the input
type. You can choose among the following keyword arguments:
filename
Name of input file (a PNG file).
file
A file-like object (object with a read() method).
bytes
``array`` or ``string`` with PNG data.
"""
if ((_guess is not None and len(kw) != 0) or
(_guess is None and len(kw) != 1)):
raise TypeError("Reader() takes exactly 1 argument")
# Will be the first 8 bytes, later on. See validate_signature.
self.signature = None
self.transparent = None
self.text = {}
# A pair of (len, chunk_type) if a chunk has been read but its data and
# checksum have not (in other words the file position is just
# past the 4 bytes that specify the chunk type). See preamble
# method for how this is used.
self.atchunk = None
if _guess is not None:
if isinstance(_guess, array):
kw["bytes"] = _guess
elif isinstance(_guess, str):
kw["filename"] = _guess
elif hasattr(_guess, 'read'):
kw["file"] = _guess
self.close_file = False
if "filename" in kw:
self.file = open(kw["filename"], "rb")
self.close_file = True
elif "file" in kw:
self.file = kw["file"]
elif "bytes" in kw:
self.file = _readable(kw["bytes"])
else:
raise TypeError("expecting filename, file or bytes array")
def __del__(self):
if self.close_file:
self.file.close()
def chunk(self, seek=None, lenient=False):
"""
Read the next PNG chunk from the input file
returns a (*chunk_type*, *data*) tuple. *chunk_type* is the chunk's
type as a byte string (all PNG chunk types are 4 bytes long).
*data* is the chunk's data content, as a byte string.
If the optional `seek` argument is
specified then it will keep reading chunks until it either runs
out of file or finds the chunk_type specified by the argument. Note
that in general the order of chunks in PNGs is unspecified, so
using `seek` can cause you to miss chunks.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions.
"""
self.validate_signature()
while True:
# http://www.w3.org/TR/PNG/#5Chunk-layout
if not self.atchunk:
self.atchunk = self.chunklentype()
length, chunk_type = self.atchunk
self.atchunk = None
data = self.file.read(length)
if len(data) != length:
raise ChunkError('Chunk %s too short for required %i octets.'
% (chunk_type, length))
checksum = self.file.read(4)
if len(checksum) != 4:
raise ChunkError('Chunk %s too short for checksum.',
chunk_type)
if seek and chunk_type != seek:
continue
verify = zlib.crc32(strtobytes(chunk_type))
verify = zlib.crc32(data, verify)
# Whether the output from zlib.crc32 is signed or not varies
# according to hideous implementation details, see
# http://bugs.python.org/issue1202 .
# We coerce it to be positive here (in a way which works on
# Python 2.3 and older).
verify &= 2**32 - 1
verify = struct.pack('!I', verify)
if checksum != verify:
(a, ) = struct.unpack('!I', checksum)
(b, ) = struct.unpack('!I', verify)
message = "Checksum error in %s chunk: 0x%08X != 0x%08X." %\
(chunk_type, a, b)
if lenient:
warnings.warn(message, RuntimeWarning)
else:
raise ChunkError(message)
return chunk_type, data
def chunks(self):
"""Return an iterator that will yield each chunk as a
(*chunktype*, *content*) pair.
"""
while True:
t,v = self.chunk()
yield t,v
if t == 'IEND':
break
def deinterlace(self, raw):
"""
Read raw pixel data, undo filters, deinterlace, and flatten.
Return in flat row flat pixel format.
"""
# Values per row (of the target image)
vpr = self.width * self.planes
# Make a result array, and make it big enough. Interleaving
# writes to the output array randomly (well, not quite), so the
# entire output array must be in memory.
if self.bitdepth > 8:
a = newHarray(vpr * self.height)
else:
a = newBarray(vpr * self.height)
source_offset = 0
filt = Filter(self.bitdepth * self.planes)
for xstart, ystart, xstep, ystep in _adam7:
if xstart >= self.width:
continue
# The previous (reconstructed) scanline. None at the
# beginning of a pass to indicate that there is no previous
# line.
filt.prev = None
# Pixels per row (reduced pass image)
ppr = int(math.ceil((self.width-xstart)/float(xstep)))
# Row size in bytes for this pass.
row_size = int(math.ceil(self.psize * ppr))
for y in range(ystart, self.height, ystep):
filter_type = raw[source_offset]
scanline = raw[source_offset + 1:source_offset + row_size + 1]
source_offset += (row_size + 1)
if filter_type not in (0, 1, 2, 3, 4):
raise FormatError('Invalid PNG Filter Type.'
' See http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters .')
filt.undo_filter(filter_type, scanline)
# Convert so that there is one element per pixel value
flat = self.serialtoflat(scanline, ppr)
end_offset = (y + 1) * vpr
if xstep == 1:
# Last pass (0, 1, 1, 2))
assert xstart == 0
offset = y * vpr
a[offset:end_offset] = flat
else:
offset = y * vpr + xstart * self.planes
for i in range(self.planes):
a[offset + i:end_offset:self.planes * xstep] = \
flat[i::self.planes]
return a
def iterboxed(self, rows):
"""
Iterator that yields each scanline in boxed row flat pixel format.
`rows` should be an iterator that yields the bytes of
each row in turn.
"""
def asvalues(raw):
"""
Convert a row of raw bytes into a flat row.
Result may or may not share with argument
"""
if self.bitdepth == 8:
return raw
if self.bitdepth == 16:
raw = bytearray_to_bytes(raw)
return array('H', struct.unpack('!%dH' % (len(raw) // 2), raw))
assert self.bitdepth < 8
width = self.width
# Samples per byte
spb = 8 // self.bitdepth
out = newBarray()
mask = 2 ** self.bitdepth - 1
# reversed range(spb)
shifts = [self.bitdepth * it for it in range(spb - 1, -1, -1)]
for o in raw:
out.extend([mask & (o >> i) for i in shifts])
return out[:width]
return map(asvalues, rows)
def serialtoflat(self, raw, width=None):
"""Convert serial (byte stream) pixel data to flat row flat pixel."""
if self.bitdepth == 8:
return raw
if self.bitdepth == 16:
raw = bytearray_to_bytes(raw)
return array('H',
struct.unpack('!%dH' % (len(raw) // 2), raw))
assert self.bitdepth < 8
if width is None:
width = self.width
# Samples per byte
spb = 8 // self.bitdepth
out = newBarray()
mask = 2**self.bitdepth - 1
# reversed range(spb)
shifts = [self.bitdepth * it for it in range(spb - 1, -1, -1)]
l = width
for o in raw:
out.extend([(mask&(o>>s)) for s in shifts][:l])
l -= spb
if l <= 0:
l = width
return out
def iterstraight(self, raw):
"""
Iterator that undoes the effect of filtering
Yields each row in serialised format (as a sequence of bytes).
Assumes input is straightlaced. `raw` should be an iterable
that yields the raw bytes in chunks of arbitrary size.
"""
# length of row, in bytes (with filter)
rb_1 = self.row_bytes + 1
a = bytearray()
filt = Filter(self.bitdepth * self.planes)
for some in raw:
a.extend(some)
offset = 0
while len(a) >= rb_1 + offset:
filter_type = a[offset]
if filter_type not in (0, 1, 2, 3, 4):
raise FormatError('Invalid PNG Filter Type.'
' See http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters .')
scanline = a[offset + 1:offset + rb_1]
filt.undo_filter(filter_type, scanline)
yield scanline
offset += rb_1
del a[:offset]
if len(a) != 0:
# :file:format We get here with a file format error:
# when the available bytes (after decompressing) do not
# pack into exact rows.
raise FormatError(
'Wrong size for decompressed IDAT chunk.')
assert len(a) == 0
def validate_signature(self):
"""If signature (header) has not been read then read and validate it"""
if self.signature:
return
self.signature = self.file.read(8)
if self.signature != png_signature:
raise FormatError("PNG file has invalid signature.")
def preamble(self, lenient=False):
"""
Extract the image metadata
Extract the image metadata by reading the initial part of
the PNG file up to the start of the ``IDAT`` chunk. All the
chunks that precede the ``IDAT`` chunk are read and either
processed for metadata or discarded.
If the optional `lenient` argument evaluates to `True`, checksum
failures will raise warnings rather than exceptions.
"""
self.validate_signature()
while True:
if not self.atchunk:
self.atchunk = self.chunklentype()
if self.atchunk is None:
raise FormatError(
'This PNG file has no IDAT chunks.')
if self.atchunk[1] == 'IDAT':
return
self.process_chunk(lenient=lenient)
def chunklentype(self):
"""Reads just enough of the input to determine the next
chunk's length and type, returned as a (*length*, *chunk_type*) pair
where *chunk_type* is a string. If there are no more chunks, ``None``
is returned.
"""
x = self.file.read(8)
if not x:
return None
if len(x) != 8:
raise FormatError(
'End of file whilst reading chunk length and type.')
length, chunk_type = struct.unpack('!I4s', x)
chunk_type = bytestostr(chunk_type)
if length > 2**31-1:
raise FormatError('Chunk %s is too large: %d.' % (chunk_type,
length))
return length, chunk_type
def process_chunk(self, lenient=False):
"""
Process the next chunk and its data.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions.
"""
chunk_type, data = self.chunk(lenient=lenient)
method = '_process_' + chunk_type
m = getattr(self, method, None)
if m:
m(data)
def _process_IHDR(self, data):
# http://www.w3.org/TR/PNG/#11IHDR
if len(data) != 13:
raise FormatError('IHDR chunk has incorrect length.')
(self.width, self.height, self.bitdepth, self.color_type,
self.compression, self.filter,
self.interlace) = struct.unpack("!2I5B", data)
check_bitdepth_colortype(self.bitdepth, self.color_type)
if self.compression != 0:
raise Error("unknown compression method %d" % self.compression)
if self.filter != 0:
raise FormatError("Unknown filter method %d,"
" see http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters ."
% self.filter)
if self.interlace not in (0,1):
raise FormatError("Unknown interlace method %d,"
" see http://www.w3.org/TR/2003/REC-PNG-20031110/#8InterlaceMethods ."
% self.interlace)
# Derived values
# http://www.w3.org/TR/PNG/#6Colour-values
colormap = bool(self.color_type & 1)
greyscale = not (self.color_type & 2)
alpha = bool(self.color_type & 4)
color_planes = (3,1)[greyscale or colormap]
planes = color_planes + alpha
self.colormap = colormap
self.greyscale = greyscale
self.alpha = alpha
self.color_planes = color_planes
self.planes = planes
self.psize = float(self.bitdepth)/ float(8) * planes
if int(self.psize) == self.psize:
self.psize = int(self.psize)
self.row_bytes = int(math.ceil(self.width * self.psize))
# Stores PLTE chunk if present, and is used to check
# chunk ordering constraints.
self.plte = None
# Stores tRNS chunk if present, and is used to check chunk
# ordering constraints.
self.trns = None
# Stores sbit chunk if present.
self.sbit = None
# If an sRGB chunk is present, rendering intent is updated
self.rendering_intent = None
def _process_PLTE(self, data):
# http://www.w3.org/TR/PNG/#11PLTE
if self.plte:
warnings.warn("Multiple PLTE chunks present.")
self.plte = data
if len(data) % 3 != 0:
raise FormatError(
"PLTE chunk's length should be a multiple of 3.")
if len(data) > (2**self.bitdepth)*3:
raise FormatError("PLTE chunk is too long.")
if len(data) == 0:
raise FormatError("Empty PLTE is not allowed.")
def _process_bKGD(self, data):
try:
if self.colormap:
if not self.plte:
warnings.warn(
"PLTE chunk is required before bKGD chunk.")
self.background = struct.unpack('B', data)
else:
self.background = struct.unpack("!%dH" % self.color_planes,
data)
except struct.error:
raise FormatError("bKGD chunk has incorrect length.")
def _process_tRNS(self, data):
# http://www.w3.org/TR/PNG/#11tRNS
self.trns = data
if self.colormap:
if not self.plte:
warnings.warn("PLTE chunk is required before tRNS chunk.")
else:
if len(data) > len(self.plte)/3:
# Was warning, but promoted to Error as it
# would otherwise cause pain later on.
raise FormatError("tRNS chunk is too long.")
else:
if self.alpha:
raise FormatError(
"tRNS chunk is not valid with colour type %d." %
self.color_type)
try:
self.transparent = \
struct.unpack("!%dH" % self.color_planes, data)
except struct.error:
raise FormatError("tRNS chunk has incorrect length.")
def _process_gAMA(self, data):
try:
self.gamma = struct.unpack("!L", data)[0] / 100000.0
except struct.error:
raise FormatError("gAMA chunk has incorrect length.")
def _process_iCCP(self, data):
i = data.index(zerobyte)
icc_profile_name = data[:i]
compression = data[i:i + 1]
# TODO: Raise FormatError
assert (compression == zerobyte)
icc_profile_string = zlib.decompress(data[i + 2:])
self.icc_profile = (icc_profile_name, icc_profile_string)
def _process_sBIT(self, data):
self.sbit = data
if (self.colormap and len(data) != 3 or
not self.colormap and len(data) != self.planes):
raise FormatError("sBIT chunk has incorrect length.")
def _process_sRGB(self, data):
self.rendering_intent, = struct.unpack('B', data)
def _process_cHRM(self, data):
if len(data) != struct.calcsize("!8L"):
raise FormatError("cHRM chunk has incorrect length.")
white_x, white_y, red_x, red_y, green_x, green_y, blue_x, blue_y = \
tuple([value / 100000.0 for value in struct.unpack("!8L", data)])
self.white_point = white_x, white_y
self.rgb_points = (red_x, red_y), (green_x, green_y), (blue_x, blue_y)
def _process_tEXt(self, data):
# http://www.w3.org/TR/PNG/#11tEXt
i = data.index(zerobyte)
keyword = data[:i]
try:
keyword = str(keyword, 'latin-1')
except:
pass
self.text[keyword] = data[i + 1:].decode('latin-1')
def _process_zTXt(self, data):
# http://www.w3.org/TR/PNG/#11zTXt
i = data.index(zerobyte)
keyword = data[:i]
try:
keyword = str(keyword, 'latin-1')
except:
pass
# TODO: Raise FormatError
assert data[i:i + 1] == zerobyte
text = zlib.decompress(data[i + 2:]).decode('latin-1')
self.text[keyword] = text
def _process_iTXt(self, data):
# http://www.w3.org/TR/PNG/#11iTXt
i = data.index(zerobyte)
keyword = data[:i]
try:
keyword = str(keyword, 'latin-1')
except:
pass
compress_flag = data[i + 1: i + 2]
if (compress_flag != zerobyte):
# TODO: Support for compression!!
return
# TODO: Raise FormatError
compress_method = data[i + 2: i + 3]
assert (compress_method == zerobyte)
data_ = data[i + 3:]
i = data_.index(zerobyte)
# skip language tag
data_ = data_[i + 1:]
i = data_.index(zerobyte)
# skip translated keyword
data_ = data_[i + 1:]
self.text[keyword] = data_.decode('utf-8')
def _process_pHYs(self, data):
# http://www.w3.org/TR/PNG/#11pHYs
ppux, ppuy, unit = struct.unpack('!IIB', data)
self.resolution = ((ppux, ppuy), unit)
def _process_tIME(self, data):
# http://www.w3.org/TR/PNG/#11tIME
fmt = "!H5B"
if len(data) != struct.calcsize(fmt):
raise FormatError("tIME chunk has incorrect length.")
self.last_mod_time = struct.unpack(fmt, data)
def idat(self, lenient=False):
"""Iterator that yields all the ``IDAT`` chunks as strings."""
while True:
try:
chunk_type, data = self.chunk(lenient=lenient)
except ValueError:
e = sys.exc_info()[1]
raise ChunkError(e.args[0])
if chunk_type == 'IEND':
# http://www.w3.org/TR/PNG/#11IEND
break
if chunk_type != 'IDAT':
continue
# chunk_type == 'IDAT'
# http://www.w3.org/TR/PNG/#11IDAT
if self.colormap and not self.plte:
warnings.warn("PLTE chunk is required before IDAT chunk")
yield data
def idatdecomp(self, lenient=False, max_length=0):
"""Iterator that yields decompressed ``IDAT`` strings."""
# Currently, with no max_length paramter to decompress, this
# routine will do one yield per IDAT chunk. So not very
# incremental.
d = zlib.decompressobj()
# Each IDAT chunk is passed to the decompressor, then any
# remaining state is decompressed out.
for data in self.idat(lenient):
# :todo: add a max_length argument here to limit output
# size.
yield bytearray(d.decompress(data))
yield bytearray(d.flush())
def read(self, lenient=False):
"""
Read the PNG file and decode it.
Returns (`width`, `height`, `pixels`, `metadata`).
May use excessive memory.
`pixels` are returned in boxed row flat pixel format.
If the optional `lenient` argument evaluates to True,
checksum failures will raise warnings rather than exceptions.
"""
self.preamble(lenient=lenient)
raw = self.idatdecomp(lenient)
if self.interlace:
raw = bytearray(itertools.chain(*raw))
arraycode = 'BH'[self.bitdepth > 8]
# Like :meth:`group` but producing an array.array object for
# each row.
pixels = map(lambda *row: array(arraycode, row),
*[iter(self.deinterlace(raw))]*self.width*self.planes)
else:
pixels = self.iterboxed(self.iterstraight(raw))
meta = dict()
for attr in 'greyscale alpha planes bitdepth interlace'.split():
meta[attr] = getattr(self, attr)
meta['size'] = (self.width, self.height)
for attr in ('gamma', 'transparent', 'background', 'last_mod_time',
'icc_profile', 'resolution', 'text',
'rendering_intent', 'white_point', 'rgb_points'):
a = getattr(self, attr, None)
if a is not None:
meta[attr] = a
if self.plte:
meta['palette'] = self.palette()
return self.width, self.height, pixels, meta
def read_flat(self):
"""
Read a PNG file and decode it into flat row flat pixel format.
Returns (*width*, *height*, *pixels*, *metadata*).
May use excessive memory.
`pixels` are returned in flat row flat pixel format.
See also the :meth:`read` method which returns pixels in the
more stream-friendly boxed row flat pixel format.
"""
x, y, pixel, meta = self.read()
arraycode = 'BH'[meta['bitdepth'] > 8]
pixel = array(arraycode, itertools.chain(*pixel))
return x, y, pixel, meta
def palette(self, alpha='natural'):
"""
Returns a palette that is a sequence of 3-tuples or 4-tuples
Synthesizing it from the ``PLTE`` and ``tRNS`` chunks. These
chunks should have already been processed (for example, by
calling the :meth:`preamble` method). All the tuples are the
same size: 3-tuples if there is no ``tRNS`` chunk, 4-tuples when
there is a ``tRNS`` chunk. Assumes that the image is colour type
3 and therefore a ``PLTE`` chunk is required.
If the `alpha` argument is ``'force'`` then an alpha channel is
always added, forcing the result to be a sequence of 4-tuples.
"""
if not self.plte:
raise FormatError(
"Required PLTE chunk is missing in colour type 3 image.")
plte = group(bytearray(self.plte), 3)
if self.trns or alpha == 'force':
trns = bytearray(self.trns or strtobytes(''))
trns.extend([255]*(len(plte)-len(trns)))
plte = list(map(operator.add, plte, group(trns, 1)))
return plte
def asDirect(self):
"""Returns the image data as a direct representation of an
``x * y * planes`` array. This method is intended to remove the
need for callers to deal with palettes and transparency
themselves. Images with a palette (colour type 3)
are converted to RGB or RGBA; images with transparency (a
``tRNS`` chunk) are converted to LA or RGBA as appropriate.
When returned in this format the pixel values represent the
colour value directly without needing to refer to palettes or
transparency information.
Like the :meth:`read` method this method returns a 4-tuple:
(*width*, *height*, *pixels*, *meta*)
This method normally returns pixel values with the bit depth
they have in the source image, but when the source PNG has an
``sBIT`` chunk it is inspected and can reduce the bit depth of
the result pixels; pixel values will be reduced according to
the bit depth specified in the ``sBIT`` chunk (PNG nerds should
note a single result bit depth is used for all channels; the
maximum of the ones specified in the ``sBIT`` chunk. An RGB565
image will be rescaled to 6-bit RGB666).
The *meta* dictionary that is returned reflects the `direct`
format and not the original source image. For example, an RGB
source image with a ``tRNS`` chunk to represent a transparent
colour, will have ``planes=3`` and ``alpha=False`` for the
source image, but the *meta* dictionary returned by this method
will have ``planes=4`` and ``alpha=True`` because an alpha
channel is synthesized and added.
*pixels* is the pixel data in boxed row flat pixel format (just
like the :meth:`read` method).
All the other aspects of the image data are not changed.
"""
self.preamble()
# Simple case, no conversion necessary.
if not self.colormap and not self.trns and not self.sbit:
return self.read()
x, y, pixels, meta = self.read()
if self.colormap:
meta['colormap'] = False
meta['alpha'] = bool(self.trns)
meta['bitdepth'] = 8
meta['planes'] = 3 + bool(self.trns)
plte = self.palette()
def iterpal(pixels):
for row in pixels:
row = [plte[i] for i in row]
yield bytearray(itertools.chain(*row))
pixels = iterpal(pixels)
elif self.trns:
# It would be nice if there was some reasonable way
# of doing this without generating a whole load of
# intermediate tuples. But tuples does seem like the
# easiest way, with no other way clearly much simpler or
# much faster. (Actually, the L to LA conversion could
# perhaps go faster (all those 1-tuples!), but I still
# wonder whether the code proliferation is worth it)
it = self.transparent
maxval = 2**meta['bitdepth'] - 1
planes = meta['planes']
meta['alpha'] = True
meta['planes'] += 1
if meta['bitdepth'] > 8:
def wrap_array(row):
return array('H', row)
else:
wrap_array = bytearray
def itertrns(pixels):
for row in pixels:
# For each row we group it into pixels, then form a
# characterisation vector that says whether each
# pixel is opaque or not. Then we convert
# True/False to 0/maxval (by multiplication),
# and add it as the extra channel.
row = group(row, planes)
opa = [maxval * (it != i) for i in row]
opa = zip(opa) # convert to 1-tuples
yield wrap_array(itertools.chain(*list(map(operator.add,
row, opa))))
pixels = itertrns(pixels)
targetbitdepth = None
if self.sbit:
sbit = struct.unpack('%dB' % len(self.sbit), self.sbit)
targetbitdepth = max(sbit)
if targetbitdepth > meta['bitdepth']:
raise Error('sBIT chunk %r exceeds bitdepth %d' %
(sbit, self.bitdepth))
if min(sbit) <= 0:
raise Error('sBIT chunk %r has a 0-entry' % sbit)
if targetbitdepth == meta['bitdepth']:
targetbitdepth = None
if targetbitdepth:
shift = meta['bitdepth'] - targetbitdepth
meta['bitdepth'] = targetbitdepth
def itershift(pixels):
for row in pixels:
yield array('BH'[targetbitdepth > 8],
[it >> shift for it in row])
pixels = itershift(pixels)
return x, y, pixels, meta
def asFloat(self, maxval=1.0):
"""Return image pixels as per :meth:`asDirect` method, but scale
all pixel values to be floating point values between 0.0 and
*maxval*.
"""
x, y, pixels, info = self.asDirect()
sourcemaxval = 2**info['bitdepth'] - 1
del info['bitdepth']
info['maxval'] = float(maxval)
factor = float(maxval) / float(sourcemaxval)
def iterfloat():
for row in pixels:
yield [factor * it for it in row]
return x, y, iterfloat(), info
def _as_rescale(self, get, targetbitdepth):
"""Helper used by :meth:`asRGB8` and :meth:`asRGBA8`."""
width, height, pixels, meta = get()
maxval = 2**meta['bitdepth'] - 1
targetmaxval = 2**targetbitdepth - 1
factor = float(targetmaxval) / float(maxval)
meta['bitdepth'] = targetbitdepth
def iterscale(rows):
for row in rows:
yield array('BH'[targetbitdepth > 8],
[int(round(x * factor)) for x in row])
if maxval == targetmaxval:
return width, height, pixels, meta
else:
if 'transparent' in meta:
transparent = meta['transparent']
if isinstance(transparent, tuple):
transparent = tuple(list(
iterscale((transparent,))
)[0])
else:
transparent = tuple(list(
iterscale(((transparent,),))
)[0])[0]
meta['transparent'] = transparent
return width, height, iterscale(pixels), meta
def asRGB8(self):
"""
Return the image data as an RGB pixels with 8-bits per sample.
This is like the :meth:`asRGB` method except that
this method additionally rescales the values so that they
are all between 0 and 255 (8-bit). In the case where the
source image has a bit depth < 8 the transformation preserves
all the information; where the source image has bit depth
> 8, then rescaling to 8-bit values loses precision. No
dithering is performed. Like :meth:`asRGB`, an alpha channel
in the source image will raise an exception.
This function returns a 4-tuple:
(*width*, *height*, *pixels*, *metadata*).
*width*, *height*, *metadata* are as per the
:meth:`read` method.
*pixels* is the pixel data in boxed row flat pixel format.
"""
return self._as_rescale(self.asRGB, 8)
def asRGBA8(self):
"""
Return the image data as RGBA pixels with 8-bits per sample.
This method is similar to :meth:`asRGB8` and
:meth:`asRGBA`: The result pixels have an alpha channel, *and*
values are rescaled to the range 0 to 255. The alpha channel is
synthesized if necessary (with a small speed penalty).
"""
return self._as_rescale(self.asRGBA, 8)
def asRGB(self):
"""
Return image as RGB pixels.
RGB colour images are passed through unchanged;
greyscales are expanded into RGB triplets
(there is a small speed overhead for doing this).
An alpha channel in the source image will raise an exception.
The return values are as for the :meth:`read` method
except that the *metadata* reflect the returned pixels, not the
source image. In particular, for this method
``metadata['greyscale']`` will be ``False``.
"""
width, height, pixels, meta = self.asDirect()
if meta['alpha']:
raise Error("will not convert image with alpha channel to RGB")
if not meta['greyscale']:
return width, height, pixels, meta
meta['greyscale'] = False
newarray = (newBarray, newHarray)[meta['bitdepth'] > 8]
def iterrgb():
for row in pixels:
a = newarray(3 * width)
for i in range(3):
a[i::3] = row
yield a
return width, height, iterrgb(), meta
def asRGBA(self):
"""
Return image as RGBA pixels.
Greyscales are expanded into RGB triplets;
an alpha channel is synthesized if necessary.
The return values are as for the :meth:`read` method
except that the *metadata* reflect the returned pixels, not the
source image. In particular, for this method
``metadata['greyscale']`` will be ``False``, and
``metadata['alpha']`` will be ``True``.
"""
width, height, pixels, meta = self.asDirect()
if meta['alpha'] and not meta['greyscale']:
return width, height, pixels, meta
maxval = 2**meta['bitdepth'] - 1
if meta['bitdepth'] > 8:
def newarray():
return array('H', [maxval] * 4 * width)
else:
def newarray():
return bytearray([maxval] * 4 * width)
# Not best way, but we have only array of bytes accelerated now
if meta['bitdepth'] <= 8:
filt = BaseFilter()
else:
filt = iBaseFilter()
if meta['alpha'] and meta['greyscale']:
# LA to RGBA
def convert():
for row in pixels:
# Create a fresh target row, then copy L channel
# into first three target channels, and A channel
# into fourth channel.
a = newarray()
filt.convert_la_to_rgba(row, a)
yield a
elif meta['greyscale']:
# L to RGBA
def convert():
for row in pixels:
a = newarray()
filt.convert_l_to_rgba(row, a)
yield a
else:
assert not meta['alpha'] and not meta['greyscale']
# RGB to RGBA
def convert():
for row in pixels:
a = newarray()
filt.convert_rgb_to_rgba(row, a)
yield a
meta['alpha'] = True
meta['greyscale'] = False
return width, height, convert(), meta
def check_bitdepth_colortype(bitdepth, colortype):
"""
Check that `bitdepth` and `colortype` are both valid,
and specified in a valid combination. Returns if valid,
raise an Exception if not valid.
"""
if bitdepth not in (1, 2, 4, 8, 16):
raise FormatError("invalid bit depth %d" % bitdepth)
if colortype not in (0, 2, 3, 4, 6):
raise FormatError("invalid colour type %d" % colortype)
# Check indexed (palettized) images have 8 or fewer bits
# per pixel; check only indexed or greyscale images have
# fewer than 8 bits per pixel.
if colortype & 1 and bitdepth > 8:
raise FormatError(
"Indexed images (colour type %d) cannot"
" have bitdepth > 8 (bit depth %d)."
" See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
% (bitdepth, colortype))
if bitdepth < 8 and colortype not in (0, 3):
raise FormatError("Illegal combination of bit depth (%d)"
" and colour type (%d)."
" See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
% (bitdepth, colortype))
def isinteger(x):
"""Check if `x` is platform native integer"""
try:
return int(x) == x
except (TypeError, ValueError):
return False
# === Legacy Version Support ===
# In order to work on Python 2.3 we fix up a recurring annoyance involving
# the array type. In Python 2.3 an array cannot be initialised with an
# array, and it cannot be extended with a list (or other sequence).
# Both of those are repeated issues in the code. Whilst I would not
# normally tolerate this sort of behaviour, here we "shim" a replacement
# for array into place (and hope no-one notices). You never read this.
try:
array('B').extend([])
array('B', array('B'))
except TypeError:
# Expect to get here on Python 2.3
class _array_shim(array):
true_array = array
def __new__(cls, typecode, init=None):
super_new = super(_array_shim, cls).__new__
it = super_new(cls, typecode)
if init is None:
return it
it.extend(init)
return it
def extend(self, extension):
super_extend = super(_array_shim, self).extend
if isinstance(extension, self.true_array):
return super_extend(extension)
if not isinstance(extension, (list, str)):
# Convert to list. Allows iterators to work.
extension = list(extension)
return super_extend(self.true_array(self.typecode, extension))
array = _array_shim
# Original array initialisation is faster but multiplication change class
def newBarray(length=0):
return array('B', [0] * length)
def newHarray(length=0):
return array('H', [0] * length)
| agpl-3.0 |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/latin1prober.py | 1778 | 5232 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe
from .compat import wrap_ord
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
CLASS_NUM = 8 # total classes
Latin1_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
Latin1ClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO
0, 0, 0, 0, 0, 0, 0, 0, # UDF
0, 3, 3, 3, 3, 3, 3, 3, # OTH
0, 3, 3, 3, 3, 3, 3, 3, # ASC
0, 3, 3, 3, 1, 1, 3, 3, # ASS
0, 3, 3, 3, 1, 2, 1, 2, # ACV
0, 3, 3, 3, 3, 3, 3, 3, # ACO
0, 3, 1, 3, 1, 1, 1, 3, # ASV
0, 3, 1, 3, 1, 1, 3, 3, # ASO
)
class Latin1Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self.reset()
def reset(self):
self._mLastCharClass = OTH
self._mFreqCounter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self)
def get_charset_name(self):
return "windows-1252"
def feed(self, aBuf):
aBuf = self.filter_with_english_letters(aBuf)
for c in aBuf:
charClass = Latin1_CharToClass[wrap_ord(c)]
freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM)
+ charClass]
if freq == 0:
self._mState = eNotMe
break
self._mFreqCounter[freq] += 1
self._mLastCharClass = charClass
return self.get_state()
def get_confidence(self):
if self.get_state() == eNotMe:
return 0.01
total = sum(self._mFreqCounter)
if total < 0.01:
confidence = 0.0
else:
confidence = ((self._mFreqCounter[3] - self._mFreqCounter[1] * 20.0)
/ total)
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.73
return confidence
| mit |
dimbeni/Gnucash | src/python/init.py | 1 | 2839 | import sys
import _sw_app_utils
from gnucash import *
from _sw_core_utils import gnc_core_prefs_is_extra_enabled
import gtk
import os
sys.path.append(os.path.dirname(__file__))
noisy = gnc_core_prefs_is_extra_enabled()
if noisy:
print "woop", os.path.dirname(__file__)
import pycons.console as cons
if noisy:
print "Hello from python!"
print "test", sys.modules.keys()
print "test2", dir(_sw_app_utils)
root = _sw_app_utils.gnc_get_current_root_account()
if noisy:
print "test", dir(root), root.__class__
print "test2", dir(gnucash_core_c)
acct = Account(instance = root)
if noisy:
print "test3", dir(acct)
#print acct.GetName()
#print acct.GetBalance()
#print acct.GetSplitList()
#print "test2", dir(gnucash.gnucash_core_c)
class Console (cons.Console):
""" GTK python console """
def __init__(self, argv=[], shelltype='python', banner=[],
filename=None, size=100):
cons.Console.__init__(self, argv, shelltype, banner, filename, size)
self.buffer.create_tag('center',
justification=gtk.JUSTIFY_CENTER,
font='Mono 4')
self.figures = []
self.callbacks = []
self.last_figure = None
self.active_canvas = None
self.view.connect ('key-press-event', self.key_press_event)
self.view.connect ('button-press-event', self.button_press_event)
self.view.connect ('scroll-event', self.scroll_event)
def key_press_event (self, widget, event):
""" Handle key press event """
if self.active_canvas:
self.active_canvas.emit ('key-press-event', event)
return True
return cons.Console.key_press_event (self, widget, event)
def scroll_event (self, widget, event):
""" Scroll event """
if self.active_canvas:
return True
return False
def button_press_event (self, widget, event):
""" Button press event """
return self.refresh()
def refresh (self):
""" Refresh drawing """
for fig in self.figures:
figure, canvas, anchor = fig
canvas.draw()
return False
# Change this to "if True:" to switch on a python console at gnucash
# startup:
if False:
console = Console(argv = [], shelltype = 'python', banner = [['woop', 'title']], size = 100)
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.set_position(gtk.WIN_POS_CENTER)
window.set_default_size(800,600)
window.set_border_width(0)
# Hm. gtk.main_quit will kill gnucash without closing the file
# properly. That's kinda bad.
window.connect('destroy-event', gtk.main_quit)
window.connect('delete-event', gtk.main_quit)
window.add (console)
window.show_all()
console.grab_focus()
| gpl-2.0 |
wdv4758h/ZipPy | lib-python/3/test/test_threading.py | 1 | 29804 | # Very rudimentary test of threading module
import test.support
from test.support import verbose, strip_python_stderr, import_module
import random
import re
import sys
_thread = import_module('_thread')
threading = import_module('threading')
import time
import unittest
import weakref
import os
from test.script_helper import assert_python_ok, assert_python_failure
import subprocess
from test import lock_tests
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertTrue(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertTrue(self.nrunning.get() >= 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertEqual(t.ident, None)
self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join(NUMTASKS)
self.assertTrue(not t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assertTrue(re.match('<TestThread\(.*, stopped -?\d+\)>',
repr(t)))
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
_thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256kB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1MB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
@test.support.cpython_only
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = _thread.get_ident()
try:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = _thread.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertTrue(not t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
@test.support.cpython_only
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
newgil = hasattr(sys, 'getswitchinterval')
if newgil:
geti, seti = sys.getswitchinterval, sys.setswitchinterval
else:
geti, seti = sys.getcheckinterval, sys.setcheckinterval
old_interval = geti()
try:
for i in range(1, 100):
seti(i * 0.0002 if newgil else i // 5)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
seti(old_interval)
@test.support.cpython_only
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertFalse('daemon' in repr(t))
t.daemon = True
self.assertTrue('daemon' in repr(t))
class ThreadJoinOnShutdown(BaseTestCase):
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'os2emx')
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
def assertScriptHasOutput(self, script, expected_output):
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, expected_output)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_joining_across_fork_in_worker_thread(self):
# There used to be a possible deadlock when forking from a child
# thread. See http://bugs.python.org/issue6643.
# The script takes the following steps:
# - The main thread in the parent process starts a new thread and then
# tries to join it.
# - The join operation acquires the Lock inside the thread's _block
# Condition. (See threading.py:Thread.join().)
# - We stub out the acquire method on the condition to force it to wait
# until the child thread forks. (See LOCK ACQUIRED HERE)
# - The child thread forks. (See LOCK HELD and WORKER THREAD FORKS
# HERE)
# - The main thread of the parent process enters Condition.wait(),
# which releases the lock on the child thread.
# - The child process returns. Without the necessary fix, when the
# main thread of the child process (which used to be the child thread
# in the parent process) attempts to exit, it will try to acquire the
# lock in the Thread._block Condition object and hang, because the
# lock was held across the fork.
script = """if 1:
import os, time, threading
finish_join = False
start_fork = False
def worker():
# Wait until this thread's lock is acquired before forking to
# create the deadlock.
global finish_join
while not start_fork:
time.sleep(0.01)
# LOCK HELD: Main thread holds lock across this call.
childpid = os.fork()
finish_join = True
if childpid != 0:
# Parent process just waits for child.
os.waitpid(childpid, 0)
# Child process should just return.
w = threading.Thread(target=worker)
# Stub out the private condition variable's lock acquire method.
# This acquires the lock and then waits until the child has forked
# before returning, which will release the lock soon after. If
# someone else tries to fix this test case by acquiring this lock
# before forking instead of resetting it, the test case will
# deadlock when it shouldn't.
condition = w._block
orig_acquire = condition.acquire
call_count_lock = threading.Lock()
call_count = 0
def my_acquire():
global call_count
global start_fork
orig_acquire() # LOCK ACQUIRED HERE
start_fork = True
if call_count == 0:
while not finish_join:
time.sleep(0.01) # WORKER THREAD FORKS HERE
with call_count_lock:
call_count += 1
condition.acquire = my_acquire
w.start()
w.join()
print('end of main')
"""
self.assertScriptHasOutput(script, "end of main\n")
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_5_clear_waiter_locks_to_avoid_crash(self):
# Check that a spawned thread that forks doesn't segfault on certain
# platforms, namely OS X. This used to happen if there was a waiter
# lock in the thread's condition variable's waiters list. Even though
# we know the lock will be held across the fork, it is not safe to
# release locks held across forks on all platforms, so releasing the
# waiter lock caused a segfault on OS X. Furthermore, since locks on
# OS X are (as of this writing) implemented with a mutex + condition
# variable instead of a semaphore, while we know that the Python-level
# lock will be acquired, we can't know if the internal mutex will be
# acquired at the time of the fork.
script = """if True:
import os, time, threading
start_fork = False
def worker():
# Wait until the main thread has attempted to join this thread
# before continuing.
while not start_fork:
time.sleep(0.01)
childpid = os.fork()
if childpid != 0:
# Parent process just waits for child.
(cpid, rc) = os.waitpid(childpid, 0)
assert cpid == childpid
assert rc == 0
print('end of worker thread')
else:
# Child process should just return.
pass
w = threading.Thread(target=worker)
# Stub out the private condition variable's _release_save method.
# This releases the condition's lock and flips the global that
# causes the worker to fork. At this point, the problematic waiter
# lock has been acquired once by the waiter and has been put onto
# the waiters list.
condition = w._block
orig_release_save = condition._release_save
def my_release_save():
global start_fork
orig_release_save()
# Waiter lock held here, condition lock released.
start_fork = True
condition._release_save = my_release_save
w.start()
w.join()
print('end of main thread')
"""
output = "end of worker thread\nend of main thread\n"
self.assertScriptHasOutput(script, output)
def test_6_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
in_f = open(os.__file__, 'rb')
stuff = in_f.read(200)
null_f = open(os.devnull, 'wb')
null_f.write(stuff)
time.sleep(random.random() / 1995)
null_f.close()
in_f.close()
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
@unittest.skipUnless(sys.platform == 'darwin', 'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RuntimeError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error")
self.assertEqual(data, expected_output)
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
def test_main():
test.support.run_unittest(LockTests, PyRLockTests, CRLockTests, EventTests,
ConditionAsRLockTests, ConditionTests,
SemaphoreTests, BoundedSemaphoreTests,
ThreadTests,
ThreadJoinOnShutdown,
ThreadingExceptionTests,
BarrierTests
)
if __name__ == "__main__":
test_main()
| bsd-3-clause |
zestrada/nova-cs498cc | nova/openstack/common/db/sqlalchemy/models.py | 8 | 3663 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012 Cloudscaling Group, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models.
"""
from sqlalchemy import Column, Integer
from sqlalchemy import DateTime
from sqlalchemy.orm import object_mapper
from nova.openstack.common.db.sqlalchemy.session import get_session
from nova.openstack.common import timeutils
class ModelBase(object):
"""Base class for models."""
__table_initialized__ = False
created_at = Column(DateTime, default=timeutils.utcnow)
updated_at = Column(DateTime, onupdate=timeutils.utcnow)
metadata = None
def save(self, session=None):
"""Save this object."""
if not session:
session = get_session()
# NOTE(boris-42): This part of code should be look like:
# sesssion.add(self)
# session.flush()
# But there is a bug in sqlalchemy and eventlet that
# raises NoneType exception if there is no running
# transaction and rollback is called. As long as
# sqlalchemy has this bug we have to create transaction
# explicity.
with session.begin(subtransactions=True):
session.add(self)
session.flush()
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def get(self, key, default=None):
return getattr(self, key, default)
def __iter__(self):
columns = dict(object_mapper(self).columns).keys()
# NOTE(russellb): Allow models to specify other keys that can be looked
# up, beyond the actual db columns. An example would be the 'name'
# property for an Instance.
if hasattr(self, '_extra_keys'):
columns.extend(self._extra_keys())
self._i = iter(columns)
return self
def next(self):
n = self._i.next()
return n, getattr(self, n)
def update(self, values):
"""Make the model object behave like a dict."""
for k, v in values.iteritems():
setattr(self, k, v)
def iteritems(self):
"""Make the model object behave like a dict.
Includes attributes from joins."""
local = dict(self)
joined = dict([(k, v) for k, v in self.__dict__.iteritems()
if not k[0] == '_'])
local.update(joined)
return local.iteritems()
class SoftDeleteMixin(object):
deleted_at = Column(DateTime)
deleted = Column(Integer, default=0)
def soft_delete(self, session=None):
"""Mark this object as deleted."""
self.deleted = self.id
self.deleted_at = timeutils.utcnow()
self.save(session=session)
| apache-2.0 |
patcon/open-cabinet | venv/lib/python2.7/site-packages/setuptools/package_index.py | 301 | 38760 | """PyPI and direct package downloading"""
import sys
import os
import re
import shutil
import socket
import base64
import hashlib
from functools import wraps
from pkg_resources import (
CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
require, Environment, find_distributions, safe_name, safe_version,
to_filename, Requirement, DEVELOP_DIST,
)
from setuptools import ssl_support
from distutils import log
from distutils.errors import DistutilsError
from setuptools.compat import (urllib2, httplib, StringIO, HTTPError,
urlparse, urlunparse, unquote, splituser,
url2pathname, name2codepoint,
unichr, urljoin, urlsplit, urlunsplit,
ConfigParser)
from setuptools.compat import filterfalse
from fnmatch import translate
from setuptools.py26compat import strip_fragment
from setuptools.py27compat import get_all_headers
EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.]+)$')
HREF = re.compile("""href\\s*=\\s*['"]?([^'"> ]+)""", re.I)
# this is here to fix emacs' cruddy broken syntax highlighting
PYPI_MD5 = re.compile(
'<a href="([^"#]+)">([^<]+)</a>\n\s+\\(<a (?:title="MD5 hash"\n\s+)'
'href="[^?]+\?:action=show_md5&digest=([0-9a-f]{32})">md5</a>\\)'
)
URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):',re.I).match
EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
__all__ = [
'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
'interpret_distro_name',
]
_SOCKET_TIMEOUT = 15
def parse_bdist_wininst(name):
"""Return (base,pyversion) or (None,None) for possible .exe name"""
lower = name.lower()
base, py_ver, plat = None, None, None
if lower.endswith('.exe'):
if lower.endswith('.win32.exe'):
base = name[:-10]
plat = 'win32'
elif lower.startswith('.win32-py',-16):
py_ver = name[-7:-4]
base = name[:-16]
plat = 'win32'
elif lower.endswith('.win-amd64.exe'):
base = name[:-14]
plat = 'win-amd64'
elif lower.startswith('.win-amd64-py',-20):
py_ver = name[-7:-4]
base = name[:-20]
plat = 'win-amd64'
return base,py_ver,plat
def egg_info_for_url(url):
scheme, server, path, parameters, query, fragment = urlparse(url)
base = unquote(path.split('/')[-1])
if server=='sourceforge.net' and base=='download': # XXX Yuck
base = unquote(path.split('/')[-2])
if '#' in base: base, fragment = base.split('#',1)
return base,fragment
def distros_for_url(url, metadata=None):
"""Yield egg or source distribution objects that might be found at a URL"""
base, fragment = egg_info_for_url(url)
for dist in distros_for_location(url, base, metadata): yield dist
if fragment:
match = EGG_FRAGMENT.match(fragment)
if match:
for dist in interpret_distro_name(
url, match.group(1), metadata, precedence = CHECKOUT_DIST
):
yield dist
def distros_for_location(location, basename, metadata=None):
"""Yield egg or source distribution objects based on basename"""
if basename.endswith('.egg.zip'):
basename = basename[:-4] # strip the .zip
if basename.endswith('.egg') and '-' in basename:
# only one, unambiguous interpretation
return [Distribution.from_location(location, basename, metadata)]
if basename.endswith('.exe'):
win_base, py_ver, platform = parse_bdist_wininst(basename)
if win_base is not None:
return interpret_distro_name(
location, win_base, metadata, py_ver, BINARY_DIST, platform
)
# Try source distro extensions (.zip, .tgz, etc.)
#
for ext in EXTENSIONS:
if basename.endswith(ext):
basename = basename[:-len(ext)]
return interpret_distro_name(location, basename, metadata)
return [] # no extension matched
def distros_for_filename(filename, metadata=None):
"""Yield possible egg or source distribution objects based on a filename"""
return distros_for_location(
normalize_path(filename), os.path.basename(filename), metadata
)
def interpret_distro_name(
location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
platform=None
):
"""Generate alternative interpretations of a source distro name
Note: if `location` is a filesystem filename, you should call
``pkg_resources.normalize_path()`` on it before passing it to this
routine!
"""
# Generate alternative interpretations of a source distro name
# Because some packages are ambiguous as to name/versions split
# e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
# So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
# "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
# the spurious interpretations should be ignored, because in the event
# there's also an "adns" package, the spurious "python-1.1.0" version will
# compare lower than any numeric version number, and is therefore unlikely
# to match a request for it. It's still a potential problem, though, and
# in the long run PyPI and the distutils should go for "safe" names and
# versions in distribution archive names (sdist and bdist).
parts = basename.split('-')
if not py_version and any(re.match('py\d\.\d$', p) for p in parts[2:]):
# it is a bdist_dumb, not an sdist -- bail out
return
for p in range(1,len(parts)+1):
yield Distribution(
location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
py_version=py_version, precedence = precedence,
platform = platform
)
# From Python 2.7 docs
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def unique_values(func):
"""
Wrap a function returning an iterable such that the resulting iterable
only ever yields unique items.
"""
@wraps(func)
def wrapper(*args, **kwargs):
return unique_everseen(func(*args, **kwargs))
return wrapper
REL = re.compile("""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
# this line is here to fix emacs' cruddy broken syntax highlighting
@unique_values
def find_external_links(url, page):
"""Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
for match in REL.finditer(page):
tag, rel = match.groups()
rels = set(map(str.strip, rel.lower().split(',')))
if 'homepage' in rels or 'download' in rels:
for match in HREF.finditer(tag):
yield urljoin(url, htmldecode(match.group(1)))
for tag in ("<th>Home Page", "<th>Download URL"):
pos = page.find(tag)
if pos!=-1:
match = HREF.search(page,pos)
if match:
yield urljoin(url, htmldecode(match.group(1)))
user_agent = "Python-urllib/%s setuptools/%s" % (
sys.version[:3], require('setuptools')[0].version
)
class ContentChecker(object):
"""
A null content checker that defines the interface for checking content
"""
def feed(self, block):
"""
Feed a block of data to the hash.
"""
return
def is_valid(self):
"""
Check the hash. Return False if validation fails.
"""
return True
def report(self, reporter, template):
"""
Call reporter with information about the checker (hash name)
substituted into the template.
"""
return
class HashChecker(ContentChecker):
pattern = re.compile(
r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)='
r'(?P<expected>[a-f0-9]+)'
)
def __init__(self, hash_name, expected):
self.hash_name = hash_name
self.hash = hashlib.new(hash_name)
self.expected = expected
@classmethod
def from_url(cls, url):
"Construct a (possibly null) ContentChecker from a URL"
fragment = urlparse(url)[-1]
if not fragment:
return ContentChecker()
match = cls.pattern.search(fragment)
if not match:
return ContentChecker()
return cls(**match.groupdict())
def feed(self, block):
self.hash.update(block)
def is_valid(self):
return self.hash.hexdigest() == self.expected
def report(self, reporter, template):
msg = template % self.hash_name
return reporter(msg)
class PackageIndex(Environment):
"""A distribution index that scans web pages for download URLs"""
def __init__(
self, index_url="https://pypi.python.org/simple", hosts=('*',),
ca_bundle=None, verify_ssl=True, *args, **kw
):
Environment.__init__(self,*args,**kw)
self.index_url = index_url + "/"[:not index_url.endswith('/')]
self.scanned_urls = {}
self.fetched_urls = {}
self.package_pages = {}
self.allows = re.compile('|'.join(map(translate,hosts))).match
self.to_scan = []
if verify_ssl and ssl_support.is_available and (ca_bundle or ssl_support.find_ca_bundle()):
self.opener = ssl_support.opener_for(ca_bundle)
else: self.opener = urllib2.urlopen
def process_url(self, url, retrieve=False):
"""Evaluate a URL as a possible download, and maybe retrieve it"""
if url in self.scanned_urls and not retrieve:
return
self.scanned_urls[url] = True
if not URL_SCHEME(url):
self.process_filename(url)
return
else:
dists = list(distros_for_url(url))
if dists:
if not self.url_ok(url):
return
self.debug("Found link: %s", url)
if dists or not retrieve or url in self.fetched_urls:
list(map(self.add, dists))
return # don't need the actual page
if not self.url_ok(url):
self.fetched_urls[url] = True
return
self.info("Reading %s", url)
self.fetched_urls[url] = True # prevent multiple fetch attempts
f = self.open_url(url, "Download error on %s: %%s -- Some packages may not be found!" % url)
if f is None: return
self.fetched_urls[f.url] = True
if 'html' not in f.headers.get('content-type', '').lower():
f.close() # not html, we can't process it
return
base = f.url # handle redirects
page = f.read()
if not isinstance(page, str): # We are in Python 3 and got bytes. We want str.
if isinstance(f, HTTPError):
# Errors have no charset, assume latin1:
charset = 'latin-1'
else:
charset = f.headers.get_param('charset') or 'latin-1'
page = page.decode(charset, "ignore")
f.close()
for match in HREF.finditer(page):
link = urljoin(base, htmldecode(match.group(1)))
self.process_url(link)
if url.startswith(self.index_url) and getattr(f,'code',None)!=404:
page = self.process_index(url, page)
def process_filename(self, fn, nested=False):
# process filenames or directories
if not os.path.exists(fn):
self.warn("Not found: %s", fn)
return
if os.path.isdir(fn) and not nested:
path = os.path.realpath(fn)
for item in os.listdir(path):
self.process_filename(os.path.join(path,item), True)
dists = distros_for_filename(fn)
if dists:
self.debug("Found: %s", fn)
list(map(self.add, dists))
def url_ok(self, url, fatal=False):
s = URL_SCHEME(url)
if (s and s.group(1).lower()=='file') or self.allows(urlparse(url)[1]):
return True
msg = ("\nNote: Bypassing %s (disallowed host; see "
"http://bit.ly/1dg9ijs for details).\n")
if fatal:
raise DistutilsError(msg % url)
else:
self.warn(msg, url)
def scan_egg_links(self, search_path):
for item in search_path:
if os.path.isdir(item):
for entry in os.listdir(item):
if entry.endswith('.egg-link'):
self.scan_egg_link(item, entry)
def scan_egg_link(self, path, entry):
lines = [_f for _f in map(str.strip,
open(os.path.join(path, entry))) if _f]
if len(lines)==2:
for dist in find_distributions(os.path.join(path, lines[0])):
dist.location = os.path.join(path, *lines)
dist.precedence = SOURCE_DIST
self.add(dist)
def process_index(self,url,page):
"""Process the contents of a PyPI page"""
def scan(link):
# Process a URL to see if it's for a package page
if link.startswith(self.index_url):
parts = list(map(
unquote, link[len(self.index_url):].split('/')
))
if len(parts)==2 and '#' not in parts[1]:
# it's a package page, sanitize and index it
pkg = safe_name(parts[0])
ver = safe_version(parts[1])
self.package_pages.setdefault(pkg.lower(),{})[link] = True
return to_filename(pkg), to_filename(ver)
return None, None
# process an index page into the package-page index
for match in HREF.finditer(page):
try:
scan(urljoin(url, htmldecode(match.group(1))))
except ValueError:
pass
pkg, ver = scan(url) # ensure this page is in the page index
if pkg:
# process individual package page
for new_url in find_external_links(url, page):
# Process the found URL
base, frag = egg_info_for_url(new_url)
if base.endswith('.py') and not frag:
if ver:
new_url+='#egg=%s-%s' % (pkg,ver)
else:
self.need_version_info(url)
self.scan_url(new_url)
return PYPI_MD5.sub(
lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1,3,2), page
)
else:
return "" # no sense double-scanning non-package pages
def need_version_info(self, url):
self.scan_all(
"Page at %s links to .py file(s) without version info; an index "
"scan is required.", url
)
def scan_all(self, msg=None, *args):
if self.index_url not in self.fetched_urls:
if msg: self.warn(msg,*args)
self.info(
"Scanning index of all packages (this may take a while)"
)
self.scan_url(self.index_url)
def find_packages(self, requirement):
self.scan_url(self.index_url + requirement.unsafe_name+'/')
if not self.package_pages.get(requirement.key):
# Fall back to safe version of the name
self.scan_url(self.index_url + requirement.project_name+'/')
if not self.package_pages.get(requirement.key):
# We couldn't find the target package, so search the index page too
self.not_found_in_index(requirement)
for url in list(self.package_pages.get(requirement.key,())):
# scan each page that might be related to the desired package
self.scan_url(url)
def obtain(self, requirement, installer=None):
self.prescan()
self.find_packages(requirement)
for dist in self[requirement.key]:
if dist in requirement:
return dist
self.debug("%s does not match %s", requirement, dist)
return super(PackageIndex, self).obtain(requirement,installer)
def check_hash(self, checker, filename, tfp):
"""
checker is a ContentChecker
"""
checker.report(self.debug,
"Validating %%s checksum for %s" % filename)
if not checker.is_valid():
tfp.close()
os.unlink(filename)
raise DistutilsError(
"%s validation failed for %s; "
"possible download problem?" % (
checker.hash.name, os.path.basename(filename))
)
def add_find_links(self, urls):
"""Add `urls` to the list that will be prescanned for searches"""
for url in urls:
if (
self.to_scan is None # if we have already "gone online"
or not URL_SCHEME(url) # or it's a local file/directory
or url.startswith('file:')
or list(distros_for_url(url)) # or a direct package link
):
# then go ahead and process it now
self.scan_url(url)
else:
# otherwise, defer retrieval till later
self.to_scan.append(url)
def prescan(self):
"""Scan urls scheduled for prescanning (e.g. --find-links)"""
if self.to_scan:
list(map(self.scan_url, self.to_scan))
self.to_scan = None # from now on, go ahead and process immediately
def not_found_in_index(self, requirement):
if self[requirement.key]: # we've seen at least one distro
meth, msg = self.info, "Couldn't retrieve index page for %r"
else: # no distros seen for this name, might be misspelled
meth, msg = (self.warn,
"Couldn't find index page for %r (maybe misspelled?)")
meth(msg, requirement.unsafe_name)
self.scan_all()
def download(self, spec, tmpdir):
"""Locate and/or download `spec` to `tmpdir`, returning a local path
`spec` may be a ``Requirement`` object, or a string containing a URL,
an existing local filename, or a project/version requirement spec
(i.e. the string form of a ``Requirement`` object). If it is the URL
of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
automatically created alongside the downloaded file.
If `spec` is a ``Requirement`` object or a string containing a
project/version requirement spec, this method returns the location of
a matching distribution (possibly after downloading it to `tmpdir`).
If `spec` is a locally existing file or directory name, it is simply
returned unchanged. If `spec` is a URL, it is downloaded to a subpath
of `tmpdir`, and the local filename is returned. Various errors may be
raised if a problem occurs during downloading.
"""
if not isinstance(spec,Requirement):
scheme = URL_SCHEME(spec)
if scheme:
# It's a url, download it to tmpdir
found = self._download_url(scheme.group(1), spec, tmpdir)
base, fragment = egg_info_for_url(spec)
if base.endswith('.py'):
found = self.gen_setup(found,fragment,tmpdir)
return found
elif os.path.exists(spec):
# Existing file or directory, just return it
return spec
else:
try:
spec = Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" %
(spec,)
)
return getattr(self.fetch_distribution(spec, tmpdir),'location',None)
def fetch_distribution(
self, requirement, tmpdir, force_scan=False, source=False,
develop_ok=False, local_index=None
):
"""Obtain a distribution suitable for fulfilling `requirement`
`requirement` must be a ``pkg_resources.Requirement`` instance.
If necessary, or if the `force_scan` flag is set, the requirement is
searched for in the (online) package index as well as the locally
installed packages. If a distribution matching `requirement` is found,
the returned distribution's ``location`` is the value you would have
gotten from calling the ``download()`` method with the matching
distribution's URL or filename. If no matching distribution is found,
``None`` is returned.
If the `source` flag is set, only source distributions and source
checkout links will be considered. Unless the `develop_ok` flag is
set, development and system eggs (i.e., those using the ``.egg-info``
format) will be ignored.
"""
# process a Requirement
self.info("Searching for %s", requirement)
skipped = {}
dist = None
def find(req, env=None):
if env is None:
env = self
# Find a matching distribution; may be called more than once
for dist in env[req.key]:
if dist.precedence==DEVELOP_DIST and not develop_ok:
if dist not in skipped:
self.warn("Skipping development or system egg: %s",dist)
skipped[dist] = 1
continue
if dist in req and (dist.precedence<=SOURCE_DIST or not source):
return dist
if force_scan:
self.prescan()
self.find_packages(requirement)
dist = find(requirement)
if local_index is not None:
dist = dist or find(requirement, local_index)
if dist is None:
if self.to_scan is not None:
self.prescan()
dist = find(requirement)
if dist is None and not force_scan:
self.find_packages(requirement)
dist = find(requirement)
if dist is None:
self.warn(
"No local packages or download links found for %s%s",
(source and "a source distribution of " or ""),
requirement,
)
else:
self.info("Best match: %s", dist)
return dist.clone(location=self.download(dist.location, tmpdir))
def fetch(self, requirement, tmpdir, force_scan=False, source=False):
"""Obtain a file suitable for fulfilling `requirement`
DEPRECATED; use the ``fetch_distribution()`` method now instead. For
backward compatibility, this routine is identical but returns the
``location`` of the downloaded distribution instead of a distribution
object.
"""
dist = self.fetch_distribution(requirement,tmpdir,force_scan,source)
if dist is not None:
return dist.location
return None
def gen_setup(self, filename, fragment, tmpdir):
match = EGG_FRAGMENT.match(fragment)
dists = match and [
d for d in
interpret_distro_name(filename, match.group(1), None) if d.version
] or []
if len(dists)==1: # unambiguous ``#egg`` fragment
basename = os.path.basename(filename)
# Make sure the file has been downloaded to the temp dir.
if os.path.dirname(filename) != tmpdir:
dst = os.path.join(tmpdir, basename)
from setuptools.command.easy_install import samefile
if not samefile(filename, dst):
shutil.copy2(filename, dst)
filename=dst
with open(os.path.join(tmpdir, 'setup.py'), 'w') as file:
file.write(
"from setuptools import setup\n"
"setup(name=%r, version=%r, py_modules=[%r])\n"
% (
dists[0].project_name, dists[0].version,
os.path.splitext(basename)[0]
)
)
return filename
elif match:
raise DistutilsError(
"Can't unambiguously interpret project/version identifier %r; "
"any dashes in the name or version should be escaped using "
"underscores. %r" % (fragment,dists)
)
else:
raise DistutilsError(
"Can't process plain .py files without an '#egg=name-version'"
" suffix to enable automatic setup script generation."
)
dl_blocksize = 8192
def _download_to(self, url, filename):
self.info("Downloading %s", url)
# Download the file
fp, info = None, None
try:
checker = HashChecker.from_url(url)
fp = self.open_url(strip_fragment(url))
if isinstance(fp, HTTPError):
raise DistutilsError(
"Can't download %s: %s %s" % (url, fp.code,fp.msg)
)
headers = fp.info()
blocknum = 0
bs = self.dl_blocksize
size = -1
if "content-length" in headers:
# Some servers return multiple Content-Length headers :(
sizes = get_all_headers(headers, 'Content-Length')
size = max(map(int, sizes))
self.reporthook(url, filename, blocknum, bs, size)
with open(filename,'wb') as tfp:
while True:
block = fp.read(bs)
if block:
checker.feed(block)
tfp.write(block)
blocknum += 1
self.reporthook(url, filename, blocknum, bs, size)
else:
break
self.check_hash(checker, filename, tfp)
return headers
finally:
if fp: fp.close()
def reporthook(self, url, filename, blocknum, blksize, size):
pass # no-op
def open_url(self, url, warning=None):
if url.startswith('file:'):
return local_open(url)
try:
return open_with_auth(url, self.opener)
except (ValueError, httplib.InvalidURL) as v:
msg = ' '.join([str(arg) for arg in v.args])
if warning:
self.warn(warning, msg)
else:
raise DistutilsError('%s %s' % (url, msg))
except urllib2.HTTPError as v:
return v
except urllib2.URLError as v:
if warning:
self.warn(warning, v.reason)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v.reason))
except httplib.BadStatusLine as v:
if warning:
self.warn(warning, v.line)
else:
raise DistutilsError(
'%s returned a bad status line. The server might be '
'down, %s' %
(url, v.line)
)
except httplib.HTTPException as v:
if warning:
self.warn(warning, v)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v))
def _download_url(self, scheme, url, tmpdir):
# Determine download filename
#
name, fragment = egg_info_for_url(url)
if name:
while '..' in name:
name = name.replace('..','.').replace('\\','_')
else:
name = "__downloaded__" # default if URL has no path contents
if name.endswith('.egg.zip'):
name = name[:-4] # strip the extra .zip before download
filename = os.path.join(tmpdir,name)
# Download the file
#
if scheme=='svn' or scheme.startswith('svn+'):
return self._download_svn(url, filename)
elif scheme=='git' or scheme.startswith('git+'):
return self._download_git(url, filename)
elif scheme.startswith('hg+'):
return self._download_hg(url, filename)
elif scheme=='file':
return url2pathname(urlparse(url)[2])
else:
self.url_ok(url, True) # raises error if not allowed
return self._attempt_download(url, filename)
def scan_url(self, url):
self.process_url(url, True)
def _attempt_download(self, url, filename):
headers = self._download_to(url, filename)
if 'html' in headers.get('content-type','').lower():
return self._download_html(url, headers, filename)
else:
return filename
def _download_html(self, url, headers, filename):
file = open(filename)
for line in file:
if line.strip():
# Check for a subversion index page
if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
# it's a subversion index page:
file.close()
os.unlink(filename)
return self._download_svn(url, filename)
break # not an index page
file.close()
os.unlink(filename)
raise DistutilsError("Unexpected HTML page found at "+url)
def _download_svn(self, url, filename):
url = url.split('#',1)[0] # remove any fragment for svn's sake
creds = ''
if url.lower().startswith('svn:') and '@' in url:
scheme, netloc, path, p, q, f = urlparse(url)
if not netloc and path.startswith('//') and '/' in path[2:]:
netloc, path = path[2:].split('/',1)
auth, host = splituser(netloc)
if auth:
if ':' in auth:
user, pw = auth.split(':',1)
creds = " --username=%s --password=%s" % (user, pw)
else:
creds = " --username="+auth
netloc = host
url = urlunparse((scheme, netloc, url, p, q, f))
self.info("Doing subversion checkout from %s to %s", url, filename)
os.system("svn checkout%s -q %s %s" % (creds, url, filename))
return filename
@staticmethod
def _vcs_split_rev_from_url(url, pop_prefix=False):
scheme, netloc, path, query, frag = urlsplit(url)
scheme = scheme.split('+', 1)[-1]
# Some fragment identification fails
path = path.split('#',1)[0]
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
# Also, discard fragment
url = urlunsplit((scheme, netloc, path, query, ''))
return url, rev
def _download_git(self, url, filename):
filename = filename.split('#',1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing git clone from %s to %s", url, filename)
os.system("git clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Checking out %s", rev)
os.system("(cd %s && git checkout --quiet %s)" % (
filename,
rev,
))
return filename
def _download_hg(self, url, filename):
filename = filename.split('#',1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing hg clone from %s to %s", url, filename)
os.system("hg clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Updating to %s", rev)
os.system("(cd %s && hg up -C -r %s >&-)" % (
filename,
rev,
))
return filename
def debug(self, msg, *args):
log.debug(msg, *args)
def info(self, msg, *args):
log.info(msg, *args)
def warn(self, msg, *args):
log.warn(msg, *args)
# This pattern matches a character entity reference (a decimal numeric
# references, a hexadecimal numeric reference, or a named reference).
entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
def uchr(c):
if not isinstance(c, int):
return c
if c>255: return unichr(c)
return chr(c)
def decode_entity(match):
what = match.group(1)
if what.startswith('#x'):
what = int(what[2:], 16)
elif what.startswith('#'):
what = int(what[1:])
else:
what = name2codepoint.get(what, match.group(0))
return uchr(what)
def htmldecode(text):
"""Decode HTML entities in the given text."""
return entity_sub(decode_entity, text)
def socket_timeout(timeout=15):
def _socket_timeout(func):
def _socket_timeout(*args, **kwargs):
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
return func(*args, **kwargs)
finally:
socket.setdefaulttimeout(old_timeout)
return _socket_timeout
return _socket_timeout
def _encode_auth(auth):
"""
A function compatible with Python 2.3-3.3 that will encode
auth from a URL suitable for an HTTP header.
>>> str(_encode_auth('username%3Apassword'))
'dXNlcm5hbWU6cGFzc3dvcmQ='
Long auth strings should not cause a newline to be inserted.
>>> long_auth = 'username:' + 'password'*10
>>> chr(10) in str(_encode_auth(long_auth))
False
"""
auth_s = unquote(auth)
# convert to bytes
auth_bytes = auth_s.encode()
# use the legacy interface for Python 2.3 support
encoded_bytes = base64.encodestring(auth_bytes)
# convert back to a string
encoded = encoded_bytes.decode()
# strip the trailing carriage return
return encoded.replace('\n','')
class Credential(object):
"""
A username/password pair. Use like a namedtuple.
"""
def __init__(self, username, password):
self.username = username
self.password = password
def __iter__(self):
yield self.username
yield self.password
def __str__(self):
return '%(username)s:%(password)s' % vars(self)
class PyPIConfig(ConfigParser.ConfigParser):
def __init__(self):
"""
Load from ~/.pypirc
"""
defaults = dict.fromkeys(['username', 'password', 'repository'], '')
ConfigParser.ConfigParser.__init__(self, defaults)
rc = os.path.join(os.path.expanduser('~'), '.pypirc')
if os.path.exists(rc):
self.read(rc)
@property
def creds_by_repository(self):
sections_with_repositories = [
section for section in self.sections()
if self.get(section, 'repository').strip()
]
return dict(map(self._get_repo_cred, sections_with_repositories))
def _get_repo_cred(self, section):
repo = self.get(section, 'repository').strip()
return repo, Credential(
self.get(section, 'username').strip(),
self.get(section, 'password').strip(),
)
def find_credential(self, url):
"""
If the URL indicated appears to be a repository defined in this
config, return the credential for that repository.
"""
for repository, cred in self.creds_by_repository.items():
if url.startswith(repository):
return cred
def open_with_auth(url, opener=urllib2.urlopen):
"""Open a urllib2 request, handling HTTP authentication"""
scheme, netloc, path, params, query, frag = urlparse(url)
# Double scheme does not raise on Mac OS X as revealed by a
# failing test. We would expect "nonnumeric port". Refs #20.
if netloc.endswith(':'):
raise httplib.InvalidURL("nonnumeric port: ''")
if scheme in ('http', 'https'):
auth, host = splituser(netloc)
else:
auth = None
if not auth:
cred = PyPIConfig().find_credential(url)
if cred:
auth = str(cred)
info = cred.username, url
log.info('Authenticating as %s for %s (from .pypirc)' % info)
if auth:
auth = "Basic " + _encode_auth(auth)
new_url = urlunparse((scheme,host,path,params,query,frag))
request = urllib2.Request(new_url)
request.add_header("Authorization", auth)
else:
request = urllib2.Request(url)
request.add_header('User-Agent', user_agent)
fp = opener(request)
if auth:
# Put authentication info back into request URL if same host,
# so that links found on the page will work
s2, h2, path2, param2, query2, frag2 = urlparse(fp.url)
if s2==scheme and h2==host:
fp.url = urlunparse((s2,netloc,path2,param2,query2,frag2))
return fp
# adding a timeout to avoid freezing package_index
open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
def fix_sf_url(url):
return url # backward compatibility
def local_open(url):
"""Read a local path, with special support for directories"""
scheme, server, path, param, query, frag = urlparse(url)
filename = url2pathname(path)
if os.path.isfile(filename):
return urllib2.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
if f=='index.html':
with open(os.path.join(filename,f),'r') as fp:
body = fp.read()
break
elif os.path.isdir(os.path.join(filename,f)):
f+='/'
files.append("<a href=%r>%s</a>" % (f,f))
else:
body = ("<html><head><title>%s</title>" % url) + \
"</head><body>%s</body></html>" % '\n'.join(files)
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
headers = {'content-type': 'text/html'}
return HTTPError(url, status, message, headers, StringIO(body))
| mit |
Azure/azure-sdk-for-python | sdk/edgegateway/azure-mgmt-edgegateway/azure/mgmt/edgegateway/models/order.py | 1 | 3755 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .arm_base_model import ARMBaseModel
class Order(ARMBaseModel):
"""The order details.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param contact_information: Required. The contact details.
:type contact_information: ~azure.mgmt.edgegateway.models.ContactDetails
:param shipping_address: Required. The shipping address.
:type shipping_address: ~azure.mgmt.edgegateway.models.Address
:param current_status: Current status of the order.
:type current_status: ~azure.mgmt.edgegateway.models.OrderStatus
:ivar order_history: List of status changes in the order.
:vartype order_history: list[~azure.mgmt.edgegateway.models.OrderStatus]
:ivar serial_number: Serial number of the device.
:vartype serial_number: str
:ivar delivery_tracking_info: Tracking information for the package
delivered to the customer whether it has an original or a replacement
device.
:vartype delivery_tracking_info:
list[~azure.mgmt.edgegateway.models.TrackingInfo]
:ivar return_tracking_info: Tracking information for the package returned
from the customer whether it has an original or a replacement device.
:vartype return_tracking_info:
list[~azure.mgmt.edgegateway.models.TrackingInfo]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'contact_information': {'required': True},
'shipping_address': {'required': True},
'order_history': {'readonly': True},
'serial_number': {'readonly': True},
'delivery_tracking_info': {'readonly': True},
'return_tracking_info': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'contact_information': {'key': 'properties.contactInformation', 'type': 'ContactDetails'},
'shipping_address': {'key': 'properties.shippingAddress', 'type': 'Address'},
'current_status': {'key': 'properties.currentStatus', 'type': 'OrderStatus'},
'order_history': {'key': 'properties.orderHistory', 'type': '[OrderStatus]'},
'serial_number': {'key': 'properties.serialNumber', 'type': 'str'},
'delivery_tracking_info': {'key': 'properties.deliveryTrackingInfo', 'type': '[TrackingInfo]'},
'return_tracking_info': {'key': 'properties.returnTrackingInfo', 'type': '[TrackingInfo]'},
}
def __init__(self, **kwargs):
super(Order, self).__init__(**kwargs)
self.contact_information = kwargs.get('contact_information', None)
self.shipping_address = kwargs.get('shipping_address', None)
self.current_status = kwargs.get('current_status', None)
self.order_history = None
self.serial_number = None
self.delivery_tracking_info = None
self.return_tracking_info = None
| mit |
p2pool/p2pool | p2pool/util/variable.py | 270 | 2541 | import itertools
import weakref
from twisted.internet import defer, reactor
from twisted.python import failure, log
class Event(object):
def __init__(self):
self.observers = {}
self.id_generator = itertools.count()
self._once = None
self.times = 0
def run_and_watch(self, func):
func()
return self.watch(func)
def watch_weakref(self, obj, func):
# func must not contain a reference to obj!
watch_id = self.watch(lambda *args: func(obj_ref(), *args))
obj_ref = weakref.ref(obj, lambda _: self.unwatch(watch_id))
def watch(self, func):
id = self.id_generator.next()
self.observers[id] = func
return id
def unwatch(self, id):
self.observers.pop(id)
@property
def once(self):
res = self._once
if res is None:
res = self._once = Event()
return res
def happened(self, *event):
self.times += 1
once, self._once = self._once, None
for id, func in sorted(self.observers.iteritems()):
try:
func(*event)
except:
log.err(None, "Error while processing Event callbacks:")
if once is not None:
once.happened(*event)
def get_deferred(self, timeout=None):
once = self.once
df = defer.Deferred()
id1 = once.watch(lambda *event: df.callback(event))
if timeout is not None:
def do_timeout():
df.errback(failure.Failure(defer.TimeoutError('in Event.get_deferred')))
once.unwatch(id1)
once.unwatch(x)
delay = reactor.callLater(timeout, do_timeout)
x = once.watch(lambda *event: delay.cancel())
return df
class Variable(object):
def __init__(self, value):
self.value = value
self.changed = Event()
self.transitioned = Event()
def set(self, value):
if value == self.value:
return
oldvalue = self.value
self.value = value
self.changed.happened(value)
self.transitioned.happened(oldvalue, value)
@defer.inlineCallbacks
def get_when_satisfies(self, func):
while True:
if func(self.value):
defer.returnValue(self.value)
yield self.changed.once.get_deferred()
def get_not_none(self):
return self.get_when_satisfies(lambda val: val is not None)
| gpl-3.0 |
stackforge/compute-hyperv | compute_hyperv/tests/unit/cluster/test_livemigrationops.py | 1 | 9007 | # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from nova.compute import vm_states
from nova import exception
from nova import test as nova_test
from os_win import constants as os_win_const
from compute_hyperv.nova.cluster import livemigrationops
from compute_hyperv.nova import livemigrationops as base_livemigrationops
from compute_hyperv.tests import fake_instance
from compute_hyperv.tests.unit import test_base
@ddt.ddt
class ClusterLiveMigrationOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V Cluster LivemigrationOps class."""
_autospec_classes = [
base_livemigrationops.volumeops.VolumeOps,
]
def setUp(self):
super(ClusterLiveMigrationOpsTestCase, self).setUp()
self._fake_context = 'fake_context'
self.livemigrops = livemigrationops.ClusterLiveMigrationOps()
self._clustutils = self.livemigrops._clustutils
def test_is_instance_clustered(self):
ret = self.livemigrops.is_instance_clustered(
mock.sentinel.instance)
self.assertEqual(
self.livemigrops._clustutils.vm_exists.return_value, ret)
def test_live_migration_in_cluster(self):
migr_timeout = 10
self.flags(instance_live_migration_timeout=migr_timeout,
group='hyperv')
mock_instance = fake_instance.fake_instance_obj(self._fake_context)
self.livemigrops._clustutils.vm_exists.return_value = True
post_method = mock.MagicMock()
dest = 'fake_dest'
node_names = [dest, 'fake_node2']
get_nodes = self.livemigrops._clustutils.get_cluster_node_names
get_nodes.return_value = node_names
self.livemigrops.live_migration(
self._fake_context, mock_instance, dest, post_method,
mock.sentinel.recover_method,
block_migration=mock.sentinel.block_migration,
migrate_data=mock.sentinel.migrate_data)
clustutils = self.livemigrops._clustutils
clustutils.live_migrate_vm.assert_called_once_with(
mock_instance.name, dest, migr_timeout)
post_method.assert_called_once_with(
self._fake_context, mock_instance, dest,
mock.sentinel.block_migration, mock.sentinel.migrate_data)
@mock.patch.object(livemigrationops.ClusterLiveMigrationOps,
'_check_failed_instance_migration')
def test_live_migration_in_cluster_exception(self, mock_check_migr):
mock_instance = fake_instance.fake_instance_obj(self._fake_context)
self.livemigrops._clustutils.vm_exists.return_value = True
recover_method = mock.MagicMock()
dest = 'fake_dest'
node_names = [dest, 'fake_node2']
get_nodes = self.livemigrops._clustutils.get_cluster_node_names
get_nodes.return_value = node_names
clustutils = self.livemigrops._clustutils
clustutils.live_migrate_vm.side_effect = nova_test.TestingException
self.livemigrops.live_migration(
self._fake_context, mock_instance, dest, mock.sentinel.post_method,
recover_method,
block_migration=mock.sentinel.block_migration,
migrate_data=mock.sentinel.migrate_data)
mock_check_migr.assert_called_once_with(
mock_instance,
expected_state=os_win_const.CLUSTER_GROUP_ONLINE)
recover_method.assert_called_once_with(
self._fake_context, mock_instance, dest,
mock.sentinel.migrate_data)
@mock.patch.object(base_livemigrationops.LiveMigrationOps,
'live_migration')
def test_live_migration_outside_cluster(self, mock_super_live_migration):
mock_instance = fake_instance.fake_instance_obj(self._fake_context)
self.livemigrops._clustutils.vm_exists.return_value = True
dest = 'fake_dest'
node_names = ['fake_node1', 'fake_node2']
get_nodes = self.livemigrops._clustutils.get_cluster_node_names
get_nodes.return_value = node_names
self.livemigrops.live_migration(
self._fake_context, mock_instance, dest, mock.sentinel.post_method,
mock.sentinel.recover_method, block_migration=False,
migrate_data=None)
mock_super_live_migration.assert_called_once_with(
self._fake_context, mock_instance, dest, mock.sentinel.post_method,
mock.sentinel.recover_method, False, None)
@ddt.data({},
{'state': os_win_const.CLUSTER_GROUP_PENDING,
'expected_invalid_state': True},
{'migration_queued': True,
'expected_invalid_state': True},
{'owner_node': 'some_other_node',
'expected_invalid_state': True})
@ddt.unpack
def test_check_failed_instance_migration(
self, state=os_win_const.CLUSTER_GROUP_ONLINE,
owner_node='source_node', migration_queued=False,
expected_invalid_state=False):
state_info = dict(owner_node=owner_node.upper(),
state=state,
migration_queued=migration_queued)
self._clustutils.get_cluster_group_state_info.return_value = (
state_info)
self._clustutils.get_node_name.return_value = 'source_node'
mock_instance = mock.Mock()
if expected_invalid_state:
self.assertRaises(
exception.InstanceInvalidState,
self.livemigrops._check_failed_instance_migration,
mock_instance,
os_win_const.CLUSTER_GROUP_ONLINE)
self.assertEqual(vm_states.ERROR, mock_instance.vm_state)
else:
self.livemigrops._check_failed_instance_migration(
mock_instance, os_win_const.CLUSTER_GROUP_ONLINE)
self._clustutils.get_cluster_group_state_info.assert_called_once_with(
mock_instance.name)
self._clustutils.get_node_name.assert_called_once_with()
def test_pre_live_migration_clustered(self):
self.livemigrops.pre_live_migration(self._fake_context,
mock.sentinel.fake_instance,
mock.sentinel.bdi,
mock.sentinel.network_info)
fake_conn_vol = self.livemigrops._volumeops.connect_volumes
fake_conn_vol.assert_called_once_with(mock.sentinel.bdi)
@mock.patch.object(base_livemigrationops.LiveMigrationOps,
'pre_live_migration')
def test_pre_live_migration_not_clustered(self, mock_pre_live_migration):
self.livemigrops._clustutils.vm_exists.return_value = False
self.livemigrops.pre_live_migration(self._fake_context,
mock.sentinel.fake_instance,
mock.sentinel.bdi,
mock.sentinel.network_info)
mock_pre_live_migration.assert_called_once_with(
self._fake_context, mock.sentinel.fake_instance,
mock.sentinel.bdi, mock.sentinel.network_info)
@mock.patch.object(base_livemigrationops.LiveMigrationOps,
'post_live_migration')
def test_post_live_migration_clustered(self, mock_post_live_migration):
self.livemigrops.post_live_migration(self._fake_context,
mock.sentinel.fake_instance,
mock.sentinel.bdi,
mock.sentinel.migrate_data)
self.assertFalse(mock_post_live_migration.called)
@mock.patch.object(base_livemigrationops.LiveMigrationOps,
'post_live_migration')
def test_post_live_migration_not_clustered(self, mock_post_live_migration):
self.livemigrops._clustutils.vm_exists.return_value = False
self.livemigrops.post_live_migration(self._fake_context,
mock.sentinel.fake_instance,
mock.sentinel.bdi,
mock.sentinel.migrate_data)
mock_post_live_migration.assert_called_once_with(
self._fake_context, mock.sentinel.fake_instance,
mock.sentinel.bdi,
mock.sentinel.migrate_data)
| apache-2.0 |
andfoy/margffoy-tuay-server | env/lib/python2.7/site-packages/tornado/concurrent.py | 35 | 18000 | #!/usr/bin/env python
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities for working with threads and ``Futures``.
``Futures`` are a pattern for concurrent programming introduced in
Python 3.2 in the `concurrent.futures` package (this package has also
been backported to older versions of Python and can be installed with
``pip install futures``). Tornado will use `concurrent.futures.Future` if
it is available; otherwise it will use a compatible class defined in this
module.
"""
from __future__ import absolute_import, division, print_function, with_statement
import functools
import platform
import traceback
import sys
from tornado.log import app_log
from tornado.stack_context import ExceptionStackContext, wrap
from tornado.util import raise_exc_info, ArgReplacer
try:
from concurrent import futures
except ImportError:
futures = None
# Can the garbage collector handle cycles that include __del__ methods?
# This is true in cpython beginning with version 3.4 (PEP 442).
_GC_CYCLE_FINALIZERS = (platform.python_implementation() == 'CPython' and
sys.version_info >= (3, 4))
class ReturnValueIgnoredError(Exception):
pass
# This class and associated code in the future object is derived
# from the Trollius project, a backport of asyncio to Python 2.x - 3.x
class _TracebackLogger(object):
"""Helper to log a traceback upon destruction if not cleared.
This solves a nasty problem with Futures and Tasks that have an
exception set: if nobody asks for the exception, the exception is
never logged. This violates the Zen of Python: 'Errors should
never pass silently. Unless explicitly silenced.'
However, we don't want to log the exception as soon as
set_exception() is called: if the calling code is written
properly, it will get the exception and handle it properly. But
we *do* want to log it if result() or exception() was never called
-- otherwise developers waste a lot of time wondering why their
buggy code fails silently.
An earlier attempt added a __del__() method to the Future class
itself, but this backfired because the presence of __del__()
prevents garbage collection from breaking cycles. A way out of
this catch-22 is to avoid having a __del__() method on the Future
class itself, but instead to have a reference to a helper object
with a __del__() method that logs the traceback, where we ensure
that the helper object doesn't participate in cycles, and only the
Future has a reference to it.
The helper object is added when set_exception() is called. When
the Future is collected, and the helper is present, the helper
object is also collected, and its __del__() method will log the
traceback. When the Future's result() or exception() method is
called (and a helper object is present), it removes the the helper
object, after calling its clear() method to prevent it from
logging.
One downside is that we do a fair amount of work to extract the
traceback from the exception, even when it is never logged. It
would seem cheaper to just store the exception object, but that
references the traceback, which references stack frames, which may
reference the Future, which references the _TracebackLogger, and
then the _TracebackLogger would be included in a cycle, which is
what we're trying to avoid! As an optimization, we don't
immediately format the exception; we only do the work when
activate() is called, which call is delayed until after all the
Future's callbacks have run. Since usually a Future has at least
one callback (typically set by 'yield From') and usually that
callback extracts the callback, thereby removing the need to
format the exception.
PS. I don't claim credit for this solution. I first heard of it
in a discussion about closing files when they are collected.
"""
__slots__ = ('exc_info', 'formatted_tb')
def __init__(self, exc_info):
self.exc_info = exc_info
self.formatted_tb = None
def activate(self):
exc_info = self.exc_info
if exc_info is not None:
self.exc_info = None
self.formatted_tb = traceback.format_exception(*exc_info)
def clear(self):
self.exc_info = None
self.formatted_tb = None
def __del__(self):
if self.formatted_tb:
app_log.error('Future exception was never retrieved: %s',
''.join(self.formatted_tb).rstrip())
class Future(object):
"""Placeholder for an asynchronous result.
A ``Future`` encapsulates the result of an asynchronous
operation. In synchronous applications ``Futures`` are used
to wait for the result from a thread or process pool; in
Tornado they are normally used with `.IOLoop.add_future` or by
yielding them in a `.gen.coroutine`.
`tornado.concurrent.Future` is similar to
`concurrent.futures.Future`, but not thread-safe (and therefore
faster for use with single-threaded event loops).
In addition to ``exception`` and ``set_exception``, methods ``exc_info``
and ``set_exc_info`` are supported to capture tracebacks in Python 2.
The traceback is automatically available in Python 3, but in the
Python 2 futures backport this information is discarded.
This functionality was previously available in a separate class
``TracebackFuture``, which is now a deprecated alias for this class.
.. versionchanged:: 4.0
`tornado.concurrent.Future` is always a thread-unsafe ``Future``
with support for the ``exc_info`` methods. Previously it would
be an alias for the thread-safe `concurrent.futures.Future`
if that package was available and fall back to the thread-unsafe
implementation if it was not.
.. versionchanged:: 4.1
If a `.Future` contains an error but that error is never observed
(by calling ``result()``, ``exception()``, or ``exc_info()``),
a stack trace will be logged when the `.Future` is garbage collected.
This normally indicates an error in the application, but in cases
where it results in undesired logging it may be necessary to
suppress the logging by ensuring that the exception is observed:
``f.add_done_callback(lambda f: f.exception())``.
"""
def __init__(self):
self._done = False
self._result = None
self._exc_info = None
self._log_traceback = False # Used for Python >= 3.4
self._tb_logger = None # Used for Python <= 3.3
self._callbacks = []
def cancel(self):
"""Cancel the operation, if possible.
Tornado ``Futures`` do not support cancellation, so this method always
returns False.
"""
return False
def cancelled(self):
"""Returns True if the operation has been cancelled.
Tornado ``Futures`` do not support cancellation, so this method
always returns False.
"""
return False
def running(self):
"""Returns True if this operation is currently running."""
return not self._done
def done(self):
"""Returns True if the future has finished running."""
return self._done
def _clear_tb_log(self):
self._log_traceback = False
if self._tb_logger is not None:
self._tb_logger.clear()
self._tb_logger = None
def result(self, timeout=None):
"""If the operation succeeded, return its result. If it failed,
re-raise its exception.
This method takes a ``timeout`` argument for compatibility with
`concurrent.futures.Future` but it is an error to call it
before the `Future` is done, so the ``timeout`` is never used.
"""
self._clear_tb_log()
if self._result is not None:
return self._result
if self._exc_info is not None:
raise_exc_info(self._exc_info)
self._check_done()
return self._result
def exception(self, timeout=None):
"""If the operation raised an exception, return the `Exception`
object. Otherwise returns None.
This method takes a ``timeout`` argument for compatibility with
`concurrent.futures.Future` but it is an error to call it
before the `Future` is done, so the ``timeout`` is never used.
"""
self._clear_tb_log()
if self._exc_info is not None:
return self._exc_info[1]
else:
self._check_done()
return None
def add_done_callback(self, fn):
"""Attaches the given callback to the `Future`.
It will be invoked with the `Future` as its argument when the Future
has finished running and its result is available. In Tornado
consider using `.IOLoop.add_future` instead of calling
`add_done_callback` directly.
"""
if self._done:
fn(self)
else:
self._callbacks.append(fn)
def set_result(self, result):
"""Sets the result of a ``Future``.
It is undefined to call any of the ``set`` methods more than once
on the same object.
"""
self._result = result
self._set_done()
def set_exception(self, exception):
"""Sets the exception of a ``Future.``"""
self.set_exc_info(
(exception.__class__,
exception,
getattr(exception, '__traceback__', None)))
def exc_info(self):
"""Returns a tuple in the same format as `sys.exc_info` or None.
.. versionadded:: 4.0
"""
self._clear_tb_log()
return self._exc_info
def set_exc_info(self, exc_info):
"""Sets the exception information of a ``Future.``
Preserves tracebacks on Python 2.
.. versionadded:: 4.0
"""
self._exc_info = exc_info
self._log_traceback = True
if not _GC_CYCLE_FINALIZERS:
self._tb_logger = _TracebackLogger(exc_info)
try:
self._set_done()
finally:
# Activate the logger after all callbacks have had a
# chance to call result() or exception().
if self._log_traceback and self._tb_logger is not None:
self._tb_logger.activate()
self._exc_info = exc_info
def _check_done(self):
if not self._done:
raise Exception("DummyFuture does not support blocking for results")
def _set_done(self):
self._done = True
for cb in self._callbacks:
try:
cb(self)
except Exception:
app_log.exception('Exception in callback %r for %r',
cb, self)
self._callbacks = None
# On Python 3.3 or older, objects with a destructor part of a reference
# cycle are never destroyed. It's no longer the case on Python 3.4 thanks to
# the PEP 442.
if _GC_CYCLE_FINALIZERS:
def __del__(self):
if not self._log_traceback:
# set_exception() was not called, or result() or exception()
# has consumed the exception
return
tb = traceback.format_exception(*self._exc_info)
app_log.error('Future %r exception was never retrieved: %s',
self, ''.join(tb).rstrip())
TracebackFuture = Future
if futures is None:
FUTURES = Future
else:
FUTURES = (futures.Future, Future)
def is_future(x):
return isinstance(x, FUTURES)
class DummyExecutor(object):
def submit(self, fn, *args, **kwargs):
future = TracebackFuture()
try:
future.set_result(fn(*args, **kwargs))
except Exception:
future.set_exc_info(sys.exc_info())
return future
def shutdown(self, wait=True):
pass
dummy_executor = DummyExecutor()
def run_on_executor(*args, **kwargs):
"""Decorator to run a synchronous method asynchronously on an executor.
The decorated method may be called with a ``callback`` keyword
argument and returns a future.
The `.IOLoop` and executor to be used are determined by the ``io_loop``
and ``executor`` attributes of ``self``. To use different attributes,
pass keyword arguments to the decorator::
@run_on_executor(executor='_thread_pool')
def foo(self):
pass
.. versionchanged:: 4.2
Added keyword arguments to use alternative attributes.
"""
def run_on_executor_decorator(fn):
executor = kwargs.get("executor", "executor")
io_loop = kwargs.get("io_loop", "io_loop")
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
callback = kwargs.pop("callback", None)
future = getattr(self, executor).submit(fn, self, *args, **kwargs)
if callback:
getattr(self, io_loop).add_future(
future, lambda future: callback(future.result()))
return future
return wrapper
if args and kwargs:
raise ValueError("cannot combine positional and keyword args")
if len(args) == 1:
return run_on_executor_decorator(args[0])
elif len(args) != 0:
raise ValueError("expected 1 argument, got %d", len(args))
return run_on_executor_decorator
_NO_RESULT = object()
def return_future(f):
"""Decorator to make a function that returns via callback return a
`Future`.
The wrapped function should take a ``callback`` keyword argument
and invoke it with one argument when it has finished. To signal failure,
the function can simply raise an exception (which will be
captured by the `.StackContext` and passed along to the ``Future``).
From the caller's perspective, the callback argument is optional.
If one is given, it will be invoked when the function is complete
with `Future.result()` as an argument. If the function fails, the
callback will not be run and an exception will be raised into the
surrounding `.StackContext`.
If no callback is given, the caller should use the ``Future`` to
wait for the function to complete (perhaps by yielding it in a
`.gen.engine` function, or passing it to `.IOLoop.add_future`).
Usage:
.. testcode::
@return_future
def future_func(arg1, arg2, callback):
# Do stuff (possibly asynchronous)
callback(result)
@gen.engine
def caller(callback):
yield future_func(arg1, arg2)
callback()
..
Note that ``@return_future`` and ``@gen.engine`` can be applied to the
same function, provided ``@return_future`` appears first. However,
consider using ``@gen.coroutine`` instead of this combination.
"""
replacer = ArgReplacer(f, 'callback')
@functools.wraps(f)
def wrapper(*args, **kwargs):
future = TracebackFuture()
callback, args, kwargs = replacer.replace(
lambda value=_NO_RESULT: future.set_result(value),
args, kwargs)
def handle_error(typ, value, tb):
future.set_exc_info((typ, value, tb))
return True
exc_info = None
with ExceptionStackContext(handle_error):
try:
result = f(*args, **kwargs)
if result is not None:
raise ReturnValueIgnoredError(
"@return_future should not be used with functions "
"that return values")
except:
exc_info = sys.exc_info()
raise
if exc_info is not None:
# If the initial synchronous part of f() raised an exception,
# go ahead and raise it to the caller directly without waiting
# for them to inspect the Future.
future.result()
# If the caller passed in a callback, schedule it to be called
# when the future resolves. It is important that this happens
# just before we return the future, or else we risk confusing
# stack contexts with multiple exceptions (one here with the
# immediate exception, and again when the future resolves and
# the callback triggers its exception by calling future.result()).
if callback is not None:
def run_callback(future):
result = future.result()
if result is _NO_RESULT:
callback()
else:
callback(future.result())
future.add_done_callback(wrap(run_callback))
return future
return wrapper
def chain_future(a, b):
"""Chain two futures together so that when one completes, so does the other.
The result (success or failure) of ``a`` will be copied to ``b``, unless
``b`` has already been completed or cancelled by the time ``a`` finishes.
"""
def copy(future):
assert future is a
if b.done():
return
if (isinstance(a, TracebackFuture) and isinstance(b, TracebackFuture)
and a.exc_info() is not None):
b.set_exc_info(a.exc_info())
elif a.exception() is not None:
b.set_exception(a.exception())
else:
b.set_result(a.result())
a.add_done_callback(copy)
| gpl-2.0 |
indigo-dc/im | IM/openid/OpenIDClient.py | 1 | 2965 | # IM - Infrastructure Manager
# Copyright (C) 2011 - GRyCAP - Universitat Politecnica de Valencia
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public Licenslast_updatee for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Class to contact with an OpenID server
'''
import requests
import json
import time
from .JWT import JWT
class OpenIDClient(object):
VERIFY_SSL = False
@staticmethod
def get_user_info_request(token):
"""
Get a the user info from a token
"""
try:
decoded_token = JWT().get_info(token)
headers = {'Authorization': 'Bearer %s' % token}
url = "%s%s" % (decoded_token['iss'], "/userinfo")
resp = requests.request("GET", url, verify=OpenIDClient.VERIFY_SSL, headers=headers)
if resp.status_code != 200:
return False, "Code: %d. Message: %s." % (resp.status_code, resp.text)
return True, json.loads(resp.text)
except Exception as ex:
return False, str(ex)
@staticmethod
def get_token_introspection(token, client_id, client_secret):
"""
Get token introspection
"""
try:
decoded_token = JWT().get_info(token)
url = "%s%s" % (decoded_token['iss'], "/introspect?token=%s&token_type_hint=access_token" % token)
resp = requests.request("GET", url, verify=OpenIDClient.VERIFY_SSL,
auth=requests.auth.HTTPBasicAuth(client_id, client_secret))
if resp.status_code != 200:
return False, "Code: %d. Message: %s." % (resp.status_code, resp.text)
return True, json.loads(resp.text)
except Exception as ex:
return False, str(ex)
@staticmethod
def is_access_token_expired(token):
"""
Check if the current access token is expired
"""
if token:
try:
decoded_token = JWT().get_info(token)
now = int(time.time())
expires = int(decoded_token['exp'])
validity = expires - now
if validity < 0:
return True, "Token expired"
else:
return False, "Valid Token for %d seconds" % validity
except Exception:
return True, "Error getting token info"
else:
return True, "No token specified"
| gpl-3.0 |
bw2/gemini | gemini/tool_fusions.py | 5 | 7079 | #!/usr/bin/env python
import os
import GeminiQuery
from gemini_constants import *
import gemini_subjects as subjects
def report_fusion(event, subjects_dict, args):
"""
Report the fusion event.
"""
# filter single line events
if len(event) == 1:
sv = event.pop()
gene1 = sv['gene']
gene1_strand = sv['strand']
gene1_start = sv['transcript_min_start']
gene1_end = sv['transcript_max_end']
# query the table to test whether the END breakpoint lies in a gene
gq = GeminiQuery.GeminiQuery(args.db)
query = """SELECT gene,
strand,
in_cosmic_census
FROM gene_summary
WHERE gene_summary.chrom = '%s'
AND (gene_summary.transcript_min_start > %s
OR gene_summary.transcript_max_end < %s)
AND gene_summary.transcript_min_start < %s
AND gene_summary.transcript_max_end > %s
AND gene_summary.gene != 'None'
LIMIT 1
""" % (sv['chrom'],
sv['transcript_max_end'],
sv['transcript_min_start'],
sv['end'],
sv['end'])
gq.run(query)
gene2, gene2_strand, gene2_cosmic = (None, None, None)
for row in gq:
gene2 = row['gene']
gene2_strand = row['strand']
gene2_cosmic = row['in_cosmic_census']
break # just get the first gene interrupted by the breakend
# Break if breakpoint2 is intergenic
if gene2 == None:
return
# if SV is a deletion or duplication, genes must be same strand for fusion
if sv['sub_type'] == 'DEL' or sv['sub_type'] == 'DUP':
if gene1_strand != gene2_strand:
return
# if SV is an inversion, genes must be opposite strands for fusion
if sv['sub_type'] == 'INV':
if gene1_strand == gene2_strand:
return
# check COSMIC status, if required
if args.in_cosmic_census and not (sv['in_cosmic_census'] or gene2_cosmic):
return
# pass the variables for compatibility with multi-line variants
end1 = sv
end2_chrom = end1['chrom']
end2_start = sv['sv_cipos_start_right']
end2_end = sv['sv_cipos_end_right']
# filter multi-line events
elif len(event) == 2:
end1 = event.pop()
end2 = event.pop()
gene1_strand, gene2_strand = end1['strand'], end2['strand'] # this is gene_summary.strand
# require that the genes are non-overlapping
if (end1['chrom'] == end2['chrom'] \
and end1['transcript_max_end'] >= end2['transcript_min_start'] \
and end1['transcript_min_start'] <= end2['transcript_max_end']):
return
# if breakpoint joins same strand,
# then genes must be same strand for fusion
if (end1['sv_strand'][0] == end1['sv_strand'][1] \
and gene1_strand != gene2_strand):
return
# if breakpoint joins opposite strands,
# then genes must also be opposite strands for fusion
if (end1['sv_strand'][0] != end1['sv_strand'][1] \
and gene1_strand == gene2_strand):
return
# check COSMIC status, if required
if args.in_cosmic_census and not (end1['in_cosmic_census'] or end2['in_cosmic_census']):
return
# store the second end for compatibility with single-line SVs
gene2 = end2['gene']
end2_chrom = end2['chrom']
end2_start = end2['sv_cipos_start_right']
end2_end = end2['sv_cipos_end_right']
# fusion passes all filters, print
print '\t'.join(map(str,
[end1['chrom'],
end1['sv_cipos_start_left'] - 1,
end1['sv_cipos_end_left'],
end2_chrom,
end2_start - 1,
end2_end,
end1['sv_event_id'],
end1['qual'],
end1['sv_strand'][0],
end1['sv_strand'][1],
end1['sub_type'],
end1['gene'],
gene2,
end1['sv_tool'],
end1['sv_evidence_type'],
end1['sv_is_precise'],
','.join(end1['variant_samples'])
])
)
return
def get_fusions(args):
"""
Identify candidate rearrangments resulting in fusion genes.
"""
gq = GeminiQuery.GeminiQuery(args.db, include_gt_cols=True)
idx_to_sample = gq.idx_to_sample
subjects_dict = subjects.get_subjects(args)
# create strings for gemini query of command line args
qual_string, ev_type_string, cosmic_string = ("", "", "")
if args.min_qual:
qual_string = " AND qual >= %s" % args.min_qual
if args.evidence_type:
ev_type_string = " AND sv_evidence_type = '%s'" % args.evidence_type
query = """SELECT variants.chrom, start, end,
ref, alt,
qual,
is_somatic, somatic_score,
type, sub_type, variants.gene,
sv_strand, sv_length,
sv_cipos_start_left,
sv_cipos_start_right,
sv_cipos_end_left,
sv_cipos_end_right,
sv_event_id, sv_mate_id,
sv_tool, sv_evidence_type,
sv_is_precise,
gene_summary.strand,
gene_summary.transcript_min_start,
gene_summary.transcript_max_end,
gene_summary.in_cosmic_census
FROM variants, gene_summary
WHERE is_somatic = 1
AND type = 'sv'
AND variants.gene is not NULL
AND variants.chrom = gene_summary.chrom
AND variants.gene = gene_summary.gene
%s
%s
ORDER BY sv_event_id
""" % (qual_string, ev_type_string)
curr = None
prev = None
gq.run(query)
for row in gq:
# single-line variants (DEL, DUP, INV)
if row['sub_type'] != 'complex':
report_fusion([row], subjects_dict, args)
# multi-line variants (BND)
elif row['sv_mate_id']:
curr = row
# the SV event ids match, and prev is not None
if (prev and curr['sv_event_id'] == prev['sv_event_id']):
report_fusion([prev, curr], subjects_dict, args)
# shift the previous
prev = curr
def run(parser, args):
if os.path.exists(args.db):
get_fusions(args)
| mit |
F35X70/Z7Mini_NX507J_H128_kernel | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
mancoast/CPythonPyc_test | cpython/270_test_syntax.py | 3 | 18584 | """This module tests SyntaxErrors.
Here's an example of the sort of thing that is tested.
>>> def f(x):
... global x
Traceback (most recent call last):
SyntaxError: name 'x' is local and global (<doctest test.test_syntax[0]>, line 1)
The tests are all raise SyntaxErrors. They were created by checking
each C call that raises SyntaxError. There are several modules that
raise these exceptions-- ast.c, compile.c, future.c, pythonrun.c, and
symtable.c.
The parser itself outlaws a lot of invalid syntax. None of these
errors are tested here at the moment. We should add some tests; since
there are infinitely many programs with invalid syntax, we would need
to be judicious in selecting some.
The compiler generates a synthetic module name for code executed by
doctest. Since all the code comes from the same module, a suffix like
[1] is appended to the module name, As a consequence, changing the
order of tests in this module means renumbering all the errors after
it. (Maybe we should enable the ellipsis option for these tests.)
In ast.c, syntax errors are raised by calling ast_error().
Errors from set_context():
>>> obj.None = 1
Traceback (most recent call last):
File "<doctest test.test_syntax[1]>", line 1
SyntaxError: cannot assign to None
>>> None = 1
Traceback (most recent call last):
File "<doctest test.test_syntax[2]>", line 1
SyntaxError: cannot assign to None
It's a syntax error to assign to the empty tuple. Why isn't it an
error to assign to the empty list? It will always raise some error at
runtime.
>>> () = 1
Traceback (most recent call last):
File "<doctest test.test_syntax[3]>", line 1
SyntaxError: can't assign to ()
>>> f() = 1
Traceback (most recent call last):
File "<doctest test.test_syntax[4]>", line 1
SyntaxError: can't assign to function call
>>> del f()
Traceback (most recent call last):
File "<doctest test.test_syntax[5]>", line 1
SyntaxError: can't delete function call
>>> a + 1 = 2
Traceback (most recent call last):
File "<doctest test.test_syntax[6]>", line 1
SyntaxError: can't assign to operator
>>> (x for x in x) = 1
Traceback (most recent call last):
File "<doctest test.test_syntax[7]>", line 1
SyntaxError: can't assign to generator expression
>>> 1 = 1
Traceback (most recent call last):
File "<doctest test.test_syntax[8]>", line 1
SyntaxError: can't assign to literal
>>> "abc" = 1
Traceback (most recent call last):
File "<doctest test.test_syntax[8]>", line 1
SyntaxError: can't assign to literal
>>> `1` = 1
Traceback (most recent call last):
File "<doctest test.test_syntax[10]>", line 1
SyntaxError: can't assign to repr
If the left-hand side of an assignment is a list or tuple, an illegal
expression inside that contain should still cause a syntax error.
This test just checks a couple of cases rather than enumerating all of
them.
>>> (a, "b", c) = (1, 2, 3)
Traceback (most recent call last):
File "<doctest test.test_syntax[11]>", line 1
SyntaxError: can't assign to literal
>>> [a, b, c + 1] = [1, 2, 3]
Traceback (most recent call last):
File "<doctest test.test_syntax[12]>", line 1
SyntaxError: can't assign to operator
>>> a if 1 else b = 1
Traceback (most recent call last):
File "<doctest test.test_syntax[13]>", line 1
SyntaxError: can't assign to conditional expression
From compiler_complex_args():
>>> def f(None=1):
... pass
Traceback (most recent call last):
File "<doctest test.test_syntax[14]>", line 1
SyntaxError: cannot assign to None
From ast_for_arguments():
>>> def f(x, y=1, z):
... pass
Traceback (most recent call last):
File "<doctest test.test_syntax[15]>", line 1
SyntaxError: non-default argument follows default argument
>>> def f(x, None):
... pass
Traceback (most recent call last):
File "<doctest test.test_syntax[16]>", line 1
SyntaxError: cannot assign to None
>>> def f(*None):
... pass
Traceback (most recent call last):
File "<doctest test.test_syntax[17]>", line 1
SyntaxError: cannot assign to None
>>> def f(**None):
... pass
Traceback (most recent call last):
File "<doctest test.test_syntax[18]>", line 1
SyntaxError: cannot assign to None
From ast_for_funcdef():
>>> def None(x):
... pass
Traceback (most recent call last):
File "<doctest test.test_syntax[19]>", line 1
SyntaxError: cannot assign to None
From ast_for_call():
>>> def f(it, *varargs):
... return list(it)
>>> L = range(10)
>>> f(x for x in L)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> f(x for x in L, 1)
Traceback (most recent call last):
File "<doctest test.test_syntax[23]>", line 1
SyntaxError: Generator expression must be parenthesized if not sole argument
>>> f((x for x in L), 1)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> f(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11,
... i12, i13, i14, i15, i16, i17, i18, i19, i20, i21, i22,
... i23, i24, i25, i26, i27, i28, i29, i30, i31, i32, i33,
... i34, i35, i36, i37, i38, i39, i40, i41, i42, i43, i44,
... i45, i46, i47, i48, i49, i50, i51, i52, i53, i54, i55,
... i56, i57, i58, i59, i60, i61, i62, i63, i64, i65, i66,
... i67, i68, i69, i70, i71, i72, i73, i74, i75, i76, i77,
... i78, i79, i80, i81, i82, i83, i84, i85, i86, i87, i88,
... i89, i90, i91, i92, i93, i94, i95, i96, i97, i98, i99,
... i100, i101, i102, i103, i104, i105, i106, i107, i108,
... i109, i110, i111, i112, i113, i114, i115, i116, i117,
... i118, i119, i120, i121, i122, i123, i124, i125, i126,
... i127, i128, i129, i130, i131, i132, i133, i134, i135,
... i136, i137, i138, i139, i140, i141, i142, i143, i144,
... i145, i146, i147, i148, i149, i150, i151, i152, i153,
... i154, i155, i156, i157, i158, i159, i160, i161, i162,
... i163, i164, i165, i166, i167, i168, i169, i170, i171,
... i172, i173, i174, i175, i176, i177, i178, i179, i180,
... i181, i182, i183, i184, i185, i186, i187, i188, i189,
... i190, i191, i192, i193, i194, i195, i196, i197, i198,
... i199, i200, i201, i202, i203, i204, i205, i206, i207,
... i208, i209, i210, i211, i212, i213, i214, i215, i216,
... i217, i218, i219, i220, i221, i222, i223, i224, i225,
... i226, i227, i228, i229, i230, i231, i232, i233, i234,
... i235, i236, i237, i238, i239, i240, i241, i242, i243,
... i244, i245, i246, i247, i248, i249, i250, i251, i252,
... i253, i254, i255)
Traceback (most recent call last):
File "<doctest test.test_syntax[25]>", line 1
SyntaxError: more than 255 arguments
The actual error cases counts positional arguments, keyword arguments,
and generator expression arguments separately. This test combines the
three.
>>> f(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11,
... i12, i13, i14, i15, i16, i17, i18, i19, i20, i21, i22,
... i23, i24, i25, i26, i27, i28, i29, i30, i31, i32, i33,
... i34, i35, i36, i37, i38, i39, i40, i41, i42, i43, i44,
... i45, i46, i47, i48, i49, i50, i51, i52, i53, i54, i55,
... i56, i57, i58, i59, i60, i61, i62, i63, i64, i65, i66,
... i67, i68, i69, i70, i71, i72, i73, i74, i75, i76, i77,
... i78, i79, i80, i81, i82, i83, i84, i85, i86, i87, i88,
... i89, i90, i91, i92, i93, i94, i95, i96, i97, i98, i99,
... i100, i101, i102, i103, i104, i105, i106, i107, i108,
... i109, i110, i111, i112, i113, i114, i115, i116, i117,
... i118, i119, i120, i121, i122, i123, i124, i125, i126,
... i127, i128, i129, i130, i131, i132, i133, i134, i135,
... i136, i137, i138, i139, i140, i141, i142, i143, i144,
... i145, i146, i147, i148, i149, i150, i151, i152, i153,
... i154, i155, i156, i157, i158, i159, i160, i161, i162,
... i163, i164, i165, i166, i167, i168, i169, i170, i171,
... i172, i173, i174, i175, i176, i177, i178, i179, i180,
... i181, i182, i183, i184, i185, i186, i187, i188, i189,
... i190, i191, i192, i193, i194, i195, i196, i197, i198,
... i199, i200, i201, i202, i203, i204, i205, i206, i207,
... i208, i209, i210, i211, i212, i213, i214, i215, i216,
... i217, i218, i219, i220, i221, i222, i223, i224, i225,
... i226, i227, i228, i229, i230, i231, i232, i233, i234,
... i235, i236, i237, i238, i239, i240, i241, i242, i243,
... (x for x in i244), i245, i246, i247, i248, i249, i250, i251,
... i252=1, i253=1, i254=1, i255=1)
Traceback (most recent call last):
File "<doctest test.test_syntax[26]>", line 1
SyntaxError: more than 255 arguments
>>> f(lambda x: x[0] = 3)
Traceback (most recent call last):
File "<doctest test.test_syntax[27]>", line 1
SyntaxError: lambda cannot contain assignment
The grammar accepts any test (basically, any expression) in the
keyword slot of a call site. Test a few different options.
>>> f(x()=2)
Traceback (most recent call last):
File "<doctest test.test_syntax[28]>", line 1
SyntaxError: keyword can't be an expression
>>> f(a or b=1)
Traceback (most recent call last):
File "<doctest test.test_syntax[29]>", line 1
SyntaxError: keyword can't be an expression
>>> f(x.y=1)
Traceback (most recent call last):
File "<doctest test.test_syntax[30]>", line 1
SyntaxError: keyword can't be an expression
More set_context():
>>> (x for x in x) += 1
Traceback (most recent call last):
File "<doctest test.test_syntax[31]>", line 1
SyntaxError: can't assign to generator expression
>>> None += 1
Traceback (most recent call last):
File "<doctest test.test_syntax[32]>", line 1
SyntaxError: cannot assign to None
>>> f() += 1
Traceback (most recent call last):
File "<doctest test.test_syntax[33]>", line 1
SyntaxError: can't assign to function call
Test continue in finally in weird combinations.
continue in for loop under finally shouuld be ok.
>>> def test():
... try:
... pass
... finally:
... for abc in range(10):
... continue
... print abc
>>> test()
9
Start simple, a continue in a finally should not be allowed.
>>> def test():
... for abc in range(10):
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
File "<doctest test.test_syntax[36]>", line 6
SyntaxError: 'continue' not supported inside 'finally' clause
This is essentially a continue in a finally which should not be allowed.
>>> def test():
... for abc in range(10):
... try:
... pass
... finally:
... try:
... continue
... except:
... pass
Traceback (most recent call last):
...
File "<doctest test.test_syntax[37]>", line 6
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
File "<doctest test.test_syntax[38]>", line 5
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... for a in ():
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
File "<doctest test.test_syntax[39]>", line 6
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... for a in ():
... try:
... pass
... finally:
... try:
... continue
... finally:
... pass
Traceback (most recent call last):
...
File "<doctest test.test_syntax[40]>", line 7
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... for a in ():
... try: pass
... finally:
... try:
... pass
... except:
... continue
Traceback (most recent call last):
...
File "<doctest test.test_syntax[41]>", line 8
SyntaxError: 'continue' not supported inside 'finally' clause
There is one test for a break that is not in a loop. The compiler
uses a single data structure to keep track of try-finally and loops,
so we need to be sure that a break is actually inside a loop. If it
isn't, there should be a syntax error.
>>> try:
... print 1
... break
... print 2
... finally:
... print 3
Traceback (most recent call last):
...
File "<doctest test.test_syntax[42]>", line 3
SyntaxError: 'break' outside loop
This should probably raise a better error than a SystemError (or none at all).
In 2.5 there was a missing exception and an assert was triggered in a debug
build. The number of blocks must be greater than CO_MAXBLOCKS. SF #1565514
>>> while 1:
... while 2:
... while 3:
... while 4:
... while 5:
... while 6:
... while 8:
... while 9:
... while 10:
... while 11:
... while 12:
... while 13:
... while 14:
... while 15:
... while 16:
... while 17:
... while 18:
... while 19:
... while 20:
... while 21:
... while 22:
... break
Traceback (most recent call last):
...
SystemError: too many statically nested blocks
This tests assignment-context; there was a bug in Python 2.5 where compiling
a complex 'if' (one with 'elif') would fail to notice an invalid suite,
leading to spurious errors.
>>> if 1:
... x() = 1
... elif 1:
... pass
Traceback (most recent call last):
...
File "<doctest test.test_syntax[44]>", line 2
SyntaxError: can't assign to function call
>>> if 1:
... pass
... elif 1:
... x() = 1
Traceback (most recent call last):
...
File "<doctest test.test_syntax[45]>", line 4
SyntaxError: can't assign to function call
>>> if 1:
... x() = 1
... elif 1:
... pass
... else:
... pass
Traceback (most recent call last):
...
File "<doctest test.test_syntax[46]>", line 2
SyntaxError: can't assign to function call
>>> if 1:
... pass
... elif 1:
... x() = 1
... else:
... pass
Traceback (most recent call last):
...
File "<doctest test.test_syntax[47]>", line 4
SyntaxError: can't assign to function call
>>> if 1:
... pass
... elif 1:
... pass
... else:
... x() = 1
Traceback (most recent call last):
...
File "<doctest test.test_syntax[48]>", line 6
SyntaxError: can't assign to function call
>>> f(a=23, a=234)
Traceback (most recent call last):
...
File "<doctest test.test_syntax[49]>", line 1
SyntaxError: keyword argument repeated
>>> del ()
Traceback (most recent call last):
...
File "<doctest test.test_syntax[50]>", line 1
SyntaxError: can't delete ()
>>> {1, 2, 3} = 42
Traceback (most recent call last):
...
File "<doctest test.test_syntax[50]>", line 1
SyntaxError: can't assign to literal
"""
import re
import unittest
import warnings
from test import test_support
class SyntaxTestCase(unittest.TestCase):
def _check_error(self, code, errtext,
filename="<testcase>", mode="exec", subclass=None):
"""Check that compiling code raises SyntaxError with errtext.
errtest is a regular expression that must be present in the
test of the exception raised. If subclass is specified it
is the expected subclass of SyntaxError (e.g. IndentationError).
"""
try:
compile(code, filename, mode)
except SyntaxError, err:
if subclass and not isinstance(err, subclass):
self.fail("SyntaxError is not a %s" % subclass.__name__)
mo = re.search(errtext, str(err))
if mo is None:
self.fail("%s did not contain '%r'" % (err, errtext,))
else:
self.fail("compile() did not raise SyntaxError")
def test_paren_arg_with_default(self):
self._check_error("def f((x)=23): pass",
"parenthesized arg with default")
def test_assign_call(self):
self._check_error("f() = 1", "assign")
def test_assign_del(self):
self._check_error("del f()", "delete")
def test_global_err_then_warn(self):
# Bug tickler: The SyntaxError raised for one global statement
# shouldn't be clobbered by a SyntaxWarning issued for a later one.
source = re.sub('(?m)^ *:', '', """\
:def error(a):
: global a # SyntaxError
:def warning():
: b = 1
: global b # SyntaxWarning
:""")
warnings.filterwarnings(action='ignore', category=SyntaxWarning)
self._check_error(source, "global")
warnings.filters.pop(0)
def test_break_outside_loop(self):
self._check_error("break", "outside loop")
def test_delete_deref(self):
source = re.sub('(?m)^ *:', '', """\
:def foo(x):
: def bar():
: print x
: del x
:""")
self._check_error(source, "nested scope")
def test_unexpected_indent(self):
self._check_error("foo()\n bar()\n", "unexpected indent",
subclass=IndentationError)
def test_no_indent(self):
self._check_error("if 1:\nfoo()", "expected an indented block",
subclass=IndentationError)
def test_bad_outdent(self):
self._check_error("if 1:\n foo()\n bar()",
"unindent does not match .* level",
subclass=IndentationError)
def test_kwargs_last(self):
self._check_error("int(base=10, '2')", "non-keyword arg")
def test_main():
test_support.run_unittest(SyntaxTestCase)
from test import test_syntax
with test_support.check_py3k_warnings(("backquote not supported",
SyntaxWarning)):
test_support.run_doctest(test_syntax, verbosity=True)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
hugs/django | tests/modeltests/reverse_lookup/models.py | 30 | 1891 | """
25. Reverse lookups
This demonstrates the reverse lookup features of the database API.
"""
from django.db import models
class User(models.Model):
name = models.CharField(max_length=200)
def __unicode__(self):
return self.name
class Poll(models.Model):
question = models.CharField(max_length=200)
creator = models.ForeignKey(User)
def __unicode__(self):
return self.question
class Choice(models.Model):
name = models.CharField(max_length=100)
poll = models.ForeignKey(Poll, related_name="poll_choice")
related_poll = models.ForeignKey(Poll, related_name="related_choice")
def __unicode__(self):
return self.name
__test__ = {'API_TESTS':"""
>>> john = User(name="John Doe")
>>> john.save()
>>> jim = User(name="Jim Bo")
>>> jim.save()
>>> first_poll = Poll(question="What's the first question?", creator=john)
>>> first_poll.save()
>>> second_poll = Poll(question="What's the second question?", creator=jim)
>>> second_poll.save()
>>> new_choice = Choice(poll=first_poll, related_poll=second_poll, name="This is the answer.")
>>> new_choice.save()
>>> # Reverse lookups by field name:
>>> User.objects.get(poll__question__exact="What's the first question?")
<User: John Doe>
>>> User.objects.get(poll__question__exact="What's the second question?")
<User: Jim Bo>
>>> # Reverse lookups by related_name:
>>> Poll.objects.get(poll_choice__name__exact="This is the answer.")
<Poll: What's the first question?>
>>> Poll.objects.get(related_choice__name__exact="This is the answer.")
<Poll: What's the second question?>
>>> # If a related_name is given you can't use the field name instead:
>>> Poll.objects.get(choice__name__exact="This is the answer")
Traceback (most recent call last):
...
FieldError: Cannot resolve keyword 'choice' into field. Choices are: creator, id, poll_choice, question, related_choice
"""}
| bsd-3-clause |
liosha2007/temporary-groupdocs-python3-sdk | groupdocs/models/SetUserRolesResponse.py | 1 | 1152 | #!/usr/bin/env python
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class SetUserRolesResponse:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'result': 'SetUserRolesResult',
'status': 'str',
'error_message': 'str',
'composedOn': 'int'
}
self.result = None # SetUserRolesResult
self.status = None # str
self.error_message = None # str
self.composedOn = None # int
| apache-2.0 |
waheedahmed/edx-platform | lms/djangoapps/lti_provider/users.py | 63 | 5258 | """
LTI user management functionality. This module reconciles the two identities
that an individual has in the campus LMS platform and on edX.
"""
import string
import random
import uuid
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.db import IntegrityError, transaction
from lti_provider.models import LtiUser
from student.models import UserProfile
def authenticate_lti_user(request, lti_user_id, lti_consumer):
"""
Determine whether the user specified by the LTI launch has an existing
account. If not, create a new Django User model and associate it with an
LtiUser object.
If the currently logged-in user does not match the user specified by the LTI
launch, log out the old user and log in the LTI identity.
"""
try:
lti_user = LtiUser.objects.get(
lti_user_id=lti_user_id,
lti_consumer=lti_consumer
)
except LtiUser.DoesNotExist:
# This is the first time that the user has been here. Create an account.
lti_user = create_lti_user(lti_user_id, lti_consumer)
if not (request.user.is_authenticated() and
request.user == lti_user.edx_user):
# The user is not authenticated, or is logged in as somebody else.
# Switch them to the LTI user
switch_user(request, lti_user, lti_consumer)
def create_lti_user(lti_user_id, lti_consumer):
"""
Generate a new user on the edX platform with a random username and password,
and associates that account with the LTI identity.
"""
edx_password = str(uuid.uuid4())
created = False
while not created:
try:
edx_user_id = generate_random_edx_username()
edx_email = "{}@{}".format(edx_user_id, settings.LTI_USER_EMAIL_DOMAIN)
with transaction.atomic():
edx_user = User.objects.create_user(
username=edx_user_id,
password=edx_password,
email=edx_email,
)
# A profile is required if PREVENT_CONCURRENT_LOGINS flag is set.
# TODO: We could populate user information from the LTI launch here,
# but it's not necessary for our current uses.
edx_user_profile = UserProfile(user=edx_user)
edx_user_profile.save()
created = True
except IntegrityError:
# The random edx_user_id wasn't unique. Since 'created' is still
# False, we will retry with a different random ID.
pass
lti_user = LtiUser(
lti_consumer=lti_consumer,
lti_user_id=lti_user_id,
edx_user=edx_user
)
lti_user.save()
return lti_user
def switch_user(request, lti_user, lti_consumer):
"""
Log out the current user, and log in using the edX identity associated with
the LTI ID.
"""
edx_user = authenticate(
username=lti_user.edx_user.username,
lti_user_id=lti_user.lti_user_id,
lti_consumer=lti_consumer
)
if not edx_user:
# This shouldn't happen, since we've created edX accounts for any LTI
# users by this point, but just in case we can return a 403.
raise PermissionDenied()
login(request, edx_user)
def generate_random_edx_username():
"""
Create a valid random edX user ID. An ID is at most 30 characters long, and
can contain upper and lowercase letters and numbers.
:return:
"""
allowable_chars = string.ascii_letters + string.digits
username = ''
for _index in range(30):
username = username + random.SystemRandom().choice(allowable_chars)
return username
class LtiBackend(object):
"""
A Django authentication backend that authenticates users via LTI. This
backend will only return a User object if it is associated with an LTI
identity (i.e. the user was created by the create_lti_user method above).
"""
def authenticate(self, username=None, lti_user_id=None, lti_consumer=None):
"""
Try to authenticate a user. This method will return a Django user object
if a user with the corresponding username exists in the database, and
if a record that links that user with an LTI user_id field exists in
the LtiUser collection.
If such a user is not found, the method returns None (in line with the
authentication backend specification).
"""
try:
edx_user = User.objects.get(username=username)
except User.DoesNotExist:
return None
try:
LtiUser.objects.get(
edx_user_id=edx_user.id,
lti_user_id=lti_user_id,
lti_consumer=lti_consumer
)
except LtiUser.DoesNotExist:
return None
return edx_user
def get_user(self, user_id):
"""
Return the User object for a user that has already been authenticated by
this backend.
"""
try:
return User.objects.get(id=user_id)
except User.DoesNotExist:
return None
| agpl-3.0 |
haskelladdict/sconcho | sconcho/gui/export_bitmap_dialog.py | 1 | 15506 | # -*- coding: utf-8 -*-
########################################################################
#
# (c) 2009-2013 Markus Dittrich
#
# This program is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public
# License Version 3 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License Version 3 for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
#######################################################################
import math
import logging
from functools import partial
from PyQt4.QtCore import (QDir,
QFile,
QFileInfo,
Qt,
SIGNAL)
from PyQt4.QtGui import (QDialog,
QDialogButtonBox,
QFileDialog,
QImageWriter,
QMessageBox)
from sconcho.gui.ui_export_bitmap_dialog import Ui_ExportBitmapDialog
from sconcho.util.canvas import visible_bounding_rect
import sconcho.util.messages as msg
# module lever logger:
logger = logging.getLogger(__name__)
# global conversion
inToCm = 1/0.393700787
# dictionary with description of most common image file formats
imageFileFormats = { "png" : "Portable Networks Graphic",
"bmp" : "Bitmap Image File",
"ico" : "Ico File Format",
"jpeg" : "Joint Photographic Experts Group Format",
"jpg" : "Joint Photographic Experts Group Format",
"ppm" : "Netpbm Color Image Format",
"tif" : "Tagged Image File Format",
"tiff" : "Tagged Image File Format",
"xbm" : "X Bitmap Format",
"xpm" : "X PixMap Format",
"svg" : "Scalable Vector Graphics" }
##########################################################################
#
# This widget allows users to adjust to control exporting of the
# canvas to a bitmap
#
##########################################################################
class ExportBitmapDialog(QDialog, Ui_ExportBitmapDialog):
def __init__(self, canvas, fileName = "Unknown", parent = None):
""" Initialize the dialog. """
super(ExportBitmapDialog, self).__init__(parent)
self.setupUi(self)
self._determine_image_formats()
self._add_image_formats_to_gui()
if fileName:
self.fullPath = QDir.homePath() + "/" + \
QFileInfo(fileName).baseName()
else:
self.fullPath = QDir.homePath() + "/"
extension = self.selected_file_extension()
self.fileNameEdit.setText(self.fullPath + "." + extension)
# NOTE: This has to come first since we rely on them
# to syncronize the widgets
self._set_up_connections()
self.canvas = canvas
self.currentUnit = 0
self.unitSelector.setCurrentIndex(self.currentUnit)
self.defaultDPI = 300
self.update_dimensions()
self.dpiSpinner.setValue(self.defaultDPI)
def _set_up_connections(self):
""" Set up all the widget connections. """
# synchronize spin boxes
self.connect(self.imageWidthSpinner, SIGNAL("valueChanged(double)"),
self.imageWidth_update)
self.connect(self.imageHeightSpinner,
SIGNAL("valueChanged(double)"),
self.imageHeight_update)
self.connect(self.widthSpinner, SIGNAL("valueChanged(int)"),
self.width_update)
self.connect(self.heightSpinner, SIGNAL("valueChanged(int)"),
self.height_update)
self.connect(self.dpiSpinner, SIGNAL("valueChanged(int)"),
self.dpi_update)
self.connect(self.unitSelector, SIGNAL("currentIndexChanged(int)"),
self.unit_update)
self.connect(self.browseButton, SIGNAL("pressed()"),
self.open_file_selector)
self.connect(self.cancelButton, SIGNAL("pressed()"),
self.close)
self.connect(self.exportButton, SIGNAL("pressed()"),
self.accept)
self.connect(self.availableFormatsChooser,
SIGNAL("currentIndexChanged(int)"),
self.update_path_extension)
def showEvent(self, event):
""" We derive showEvent so we can update
the current canvas dimensions.
"""
self.update_dimensions()
return QDialog.showEvent(self, event)
def update_dimensions(self):
""" Update values with the current canvas dimensions """
size = visible_bounding_rect(self.canvas.items())
imageWidth = math.floor(size.width())
imageHeight = math.floor(size.height())
self.imageWidth = imageWidth
self.imageHeight = imageHeight
self._aspectRatio = imageWidth/imageHeight
self.imageWidthSpinner.setValue(
int(self._convert_pixels_to_length(self.imageWidth)))
self.imageHeightSpinner.setValue(
int(self._convert_pixels_to_length(self.imageHeight)))
def _determine_image_formats(self):
""" Determine and store all image formats we can
support.
"""
self.formats = [str(formating, "utf-8") for \
formating in QImageWriter.supportedImageFormats()]
# we support svg format as well
self.formats.append("svg")
def _add_image_formats_to_gui(self):
""" This function lists all available formats on the gui
NOTE: We set png as the default for now.
"""
defaultIndex = 0
for (index, aFormat) in enumerate(self.formats):
if aFormat in imageFileFormats:
description = imageFileFormats[aFormat]
else:
description = "Image Format"
if aFormat == "png":
defaultIndex = index
formatStr = ("%s (*.%s)" % (description, aFormat))
self.availableFormatsChooser.insertItem(index, formatStr, aFormat)
# set the default
self.availableFormatsChooser.setCurrentIndex(defaultIndex)
def imageWidth_update(self, newWidth):
""" Update after image width change. """
self.imageWidth = self._convert_length_to_pixels(newWidth)
self.imageHeight = self.imageWidth/self._aspectRatio
height = self._convert_pixels_to_length(self.imageHeight)
dpi = self.dpiSpinner.value()
self._set_blocking_value(self.imageHeightSpinner, height)
self._set_blocking_value(self.widthSpinner,
self.imageWidth * dpi/self.defaultDPI)
self._set_blocking_value(self.heightSpinner,
self.imageHeight * dpi/self.defaultDPI)
def imageHeight_update(self, newHeight):
""" Update after image width change. """
self.imageHeight = self._convert_length_to_pixels(newHeight)
self.imageWidth = self.imageHeight * self._aspectRatio
width = self._convert_pixels_to_length(self.imageWidth)
dpi = self.dpiSpinner.value()
self._set_blocking_value(self.imageWidthSpinner, width)
self._set_blocking_value(self.widthSpinner,
self.imageWidth * dpi/self.defaultDPI)
self._set_blocking_value(self.heightSpinner,
self.imageHeight * dpi/self.defaultDPI)
def _convert_length_to_pixels(self, length):
""" Converts a length value in currentUnit to pixels """
# pixels
if self.currentUnit == 1:
length *= self.defaultDPI
elif self.currentUnit == 2:
length = length/inToCm * self.defaultDPI
return length
def _convert_pixels_to_length(self, length):
""" Converts a pixel value to length in currentUnit """
# pixels
if self.currentUnit == 1:
length /= self.defaultDPI
elif self.currentUnit == 2:
length = length/self.defaultDPI * inToCm
return length
def width_update(self, newWidth):
""" Update after width change. """
height = newWidth/self._aspectRatio
dpi = newWidth/self.imageWidth * self.defaultDPI
self._set_blocking_value(self.heightSpinner, height)
self._set_blocking_value(self.dpiSpinner, dpi)
def height_update(self, newHeight):
""" Update after height change. """
width = newHeight * self._aspectRatio
dpi = width/self.imageWidth * self.defaultDPI
self._set_blocking_value(self.widthSpinner, width)
self._set_blocking_value(self.dpiSpinner, dpi)
def dpi_update(self, newDPI):
""" Update after dpi change. """
width = newDPI/self.defaultDPI * self.imageWidth
height = width/self._aspectRatio
self._set_blocking_value(self.heightSpinner, height)
self._set_blocking_value(self.widthSpinner, width)
def unit_update(self, newUnit):
""" Update after unit change. """
self.currentUnit = newUnit
# pixels
if newUnit == 0:
self._set_blocking_value(self.imageWidthSpinner,
self.imageWidth)
self._set_blocking_value(self.imageHeightSpinner,
self.imageHeight)
# inches
elif newUnit == 1:
self._set_blocking_value(self.imageWidthSpinner,
self.imageWidth/self.defaultDPI)
self._set_blocking_value(self.imageHeightSpinner,
self.imageHeight/self.defaultDPI)
# cm
elif newUnit == 2:
self._set_blocking_value(self.imageWidthSpinner,
self.imageWidth/self.defaultDPI*inToCm)
self._set_blocking_value(self.imageHeightSpinner,
self.imageHeight/self.defaultDPI*inToCm)
def _set_blocking_value(self, theObject, value):
""" Helper function for setting selector values.
Blocks signals to avoid infinite loop.
"""
theObject.blockSignals(True)
theObject.setValue(value)
theObject.blockSignals(False)
def update_path_extension(self, selectionID):
""" This function updates the filename extension
if the user changes the image file format.
"""
selectedExtension = \
self.availableFormatsChooser.itemData(selectionID)
self.fileNameEdit.setText(self.fullPath + "." + selectedExtension)
def update_export_path(self, filePath):
""" This function is called if the export file
name has changed.
If the filePath has a valid image file format extension
we keep it otherwise we use the currently selected one.
"""
basename = QFileInfo(filePath).completeBaseName()
path = QFileInfo(filePath).absolutePath()
self.fullPath = \
QFileInfo(path + "/" + basename).absoluteFilePath()
extension = QFileInfo(filePath).suffix()
if extension in self.formats:
for index in range(self.availableFormatsChooser.count()):
item = self.availableFormatsChooser.itemData(index)
if item == extension:
self.availableFormatsChooser.setCurrentIndex(index)
break
else:
extension = self.selected_file_extension()
self.fileNameEdit.setText(self.fullPath + "." + extension)
def open_file_selector(self):
""" Open a file selector and ask for the name """
formatStr = "All Files (*.*)"
for aFormat in self.formats:
if aFormat in imageFileFormats:
description = imageFileFormats[aFormat]
else:
description = "Image Format"
formatStr = ("%s;; %s ( *.%s)" % (formatStr, description,
aFormat))
defaultPath = self.fileNameEdit.text()
if not defaultPath:
defaultPath = QDir.homePath()
exportFilePath = QFileDialog.getSaveFileName(self,
msg.exportPatternTitle,
defaultPath,
formatStr,
QFileDialog.DontConfirmOverwrite)
if exportFilePath:
self.update_export_path(exportFilePath)
def accept(self):
""" Checks that we have a path and reminds the user to
enter one if not.
"""
exportFilePath = self.fileNameEdit.text()
if not exportFilePath:
logger.warn(msg.noFilePathText)
QMessageBox.warning(self, msg.noFilePathTitle,
msg.noFilePathText,
QMessageBox.Close)
return
# check if a filename was provided - if not we open the
# file dialog
if not QFileInfo(exportFilePath).baseName():
self.open_file_selector()
return
# check the extension; if none is present we use the one
# selected in the format combo box
extension = QFileInfo(exportFilePath).suffix()
if extension not in self.formats:
exportFilePath += ("." + self.selected_file_extension())
# if file exists issue a warning as well
if QFile(exportFilePath).exists():
saveFileName = QFileInfo(exportFilePath).fileName()
messageBox = QMessageBox.question(self,
msg.imageFileExistsTitle,
msg.imageFileExistsText % saveFileName,
QMessageBox.Ok | QMessageBox.Cancel)
if (messageBox == QMessageBox.Cancel):
return
# provide the io subroutines with the relevant info
width = self.widthSpinner.value()
height = self.heightSpinner.value()
dpi = self.dpiSpinner.value()
self.emit(SIGNAL("export_pattern"), width, height, dpi,
exportFilePath)
QDialog.accept(self)
def selected_file_extension(self):
""" Returns a string with the currently selected image file
extension.
"""
extensionID = \
self.availableFormatsChooser.currentIndex()
selectedExtension = \
str(self.availableFormatsChooser.itemData(extensionID))
return selectedExtension
def keyPressEvent(self, event):
""" We catch the return key so we don't open
up the browse menu.
"""
if event.key() == Qt.Key_Return:
return
QDialog.keyPressEvent(self, event)
| gpl-3.0 |
renpytom/python-for-android | src/jni/sdl2_ttf/external/freetype-2.4.12/src/tools/docmaker/utils.py | 515 | 3063 | # Utils (c) 2002, 2004, 2007, 2008 David Turner <david@freetype.org>
#
import string, sys, os, glob
# current output directory
#
output_dir = None
# This function is used to sort the index. It is a simple lexicographical
# sort, except that it places capital letters before lowercase ones.
#
def index_sort( s1, s2 ):
if not s1:
return -1
if not s2:
return 1
l1 = len( s1 )
l2 = len( s2 )
m1 = string.lower( s1 )
m2 = string.lower( s2 )
for i in range( l1 ):
if i >= l2 or m1[i] > m2[i]:
return 1
if m1[i] < m2[i]:
return -1
if s1[i] < s2[i]:
return -1
if s1[i] > s2[i]:
return 1
if l2 > l1:
return -1
return 0
# Sort input_list, placing the elements of order_list in front.
#
def sort_order_list( input_list, order_list ):
new_list = order_list[:]
for id in input_list:
if not id in order_list:
new_list.append( id )
return new_list
# Open the standard output to a given project documentation file. Use
# "output_dir" to determine the filename location if necessary and save the
# old stdout in a tuple that is returned by this function.
#
def open_output( filename ):
global output_dir
if output_dir and output_dir != "":
filename = output_dir + os.sep + filename
old_stdout = sys.stdout
new_file = open( filename, "w" )
sys.stdout = new_file
return ( new_file, old_stdout )
# Close the output that was returned by "close_output".
#
def close_output( output ):
output[0].close()
sys.stdout = output[1]
# Check output directory.
#
def check_output():
global output_dir
if output_dir:
if output_dir != "":
if not os.path.isdir( output_dir ):
sys.stderr.write( "argument" + " '" + output_dir + "' " + \
"is not a valid directory" )
sys.exit( 2 )
else:
output_dir = None
def file_exists( pathname ):
"""checks that a given file exists"""
result = 1
try:
file = open( pathname, "r" )
file.close()
except:
result = None
sys.stderr.write( pathname + " couldn't be accessed\n" )
return result
def make_file_list( args = None ):
"""builds a list of input files from command-line arguments"""
file_list = []
# sys.stderr.write( repr( sys.argv[1 :] ) + '\n' )
if not args:
args = sys.argv[1 :]
for pathname in args:
if string.find( pathname, '*' ) >= 0:
newpath = glob.glob( pathname )
newpath.sort() # sort files -- this is important because
# of the order of files
else:
newpath = [pathname]
file_list.extend( newpath )
if len( file_list ) == 0:
file_list = None
else:
# now filter the file list to remove non-existing ones
file_list = filter( file_exists, file_list )
return file_list
# eof
| lgpl-2.1 |
2ndQuadrant/ansible | lib/ansible/modules/cloud/google/gcp_compute_global_address_facts.py | 9 | 6130 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_global_address_facts
description:
- Gather facts for GCP GlobalAddress
short_description: Gather facts for GCP GlobalAddress
version_added: 2.7
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
filters:
description:
- A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters.)
- Each additional filter in the list will act be added as an AND condition (filter1
and filter2) .
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: " a global address facts"
gcp_compute_global_address_facts:
filters:
- name = test_object
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: facts
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
address:
description:
- The static external IP address represented by this resource.
returned: success
type: str
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
id:
description:
- The unique identifier for the resource. This identifier is defined by the
server.
returned: success
type: int
name:
description:
- Name of the resource. Provided by the client when the resource is created.
The name must be 1-63 characters long, and comply with RFC1035. Specifically,
the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
ipVersion:
description:
- The IP Version that will be used by this address. Valid options are `IPV4`
or `IPV6`. The default value is `IPV4`.
returned: success
type: str
region:
description:
- A reference to the region where the regional address resides.
returned: success
type: str
addressType:
description:
- The type of the address to reserve, default is EXTERNAL.
- "* EXTERNAL indicates public/external single IP address."
- "* INTERNAL indicates internal IP ranges belonging to some network."
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str')))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
items = fetch_list(module, collection(module), query_options(module.params['filters']))
if items.get('items'):
items = items.get('items')
else:
items = []
return_value = {'items': items}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/addresses".format(**module.params)
def fetch_list(module, link, query):
auth = GcpSession(module, 'compute')
response = auth.get(link, params={'filter': query})
return return_if_object(module, response)
def query_options(filters):
if not filters:
return ''
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != '(' and f[-1] != ')':
queries.append("(%s)" % ''.join(f))
else:
queries.append(f)
return ' '.join(queries)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 |
Magicjarvis/relay-backend | relay/views/login.py | 1 | 2682 | # -*- coding: utf-8 -*-
from flask import request
from relay import app
from relay.decorators import jsonify
from relay.decorators import session_required
from relay.models.users import add_user
from relay.models.users import get_user
from relay.auth import generate_session_id
from relay.auth import verify_password
from relay.util import sanitize_username
from relay.views.relays import queue_relay
# todo: add logout, should sessions->users? we always have the user right?
@app.route('/login', methods=['POST'])
@jsonify
def login_user():
username = sanitize_username(request.form['username'])
password = request.form['password']
gcm_id = request.form.get('gcm_id')
user = get_user(username)
session_token = None
if user and verify_password(password, user.password):
session_token = generate_session_id()
if session_token not in user.session_tokens:
user.session_tokens.append(session_token)
if gcm_id and gcm_id not in user.gcm_ids:
user.gcm_ids.append(gcm_id)
user.put()
result = session_token
return {'session': session_token}
@app.route('/register', methods=['POST'])
@jsonify
def register_user():
# we store the name the user registers with as the display name
# we sanitize a different username to make collisions easier to find
display_name = request.form['username']
username = sanitize_username(display_name)
password = request.form['password']
email = request.form['email']
gcm_id = request.form.get('gcm_id')
user = get_user(username)
result = None
if not user:
session_token=generate_session_id()
new_user = add_user(
username,
password,
email,
gcm_id=gcm_id,
session_token=session_token
)
if new_user:
result = session_token
send_starter_relays(username)
return {'session': result}
def send_starter_relays(username):
for link in ['http://imgur.com/b6sQSIU']:
queue_relay(link, 'relay', username)
@app.route('/logout', methods=['POST'])
@jsonify
@session_required
def logout(user=None):
if not user:
return {'success': False}
# enforce later
session_token = request.headers.get('Authorization')
gcm_id = request.form.get('gcm_id')
_unregister_session(user, session_token)
_unregister_gcm(user, gcm_id)
result = user.put()
return {'success': result is not None}
def _unregister_session(user, session_token):
if session_token in user.session_tokens:
user.session_tokens.remove(session_token)
user.session_tokens = list(set(user.session_tokens))
def _unregister_gcm(user, gcm_id):
if gcm_id in user.gcm_ids:
user.gcm_ids.remove(gcm_id)
user.gcm_ids = list(set(user.gcm_ids))
| apache-2.0 |
jostep/tensorflow | tensorflow/contrib/__init__.py | 7 | 3514 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""contrib module containing volatile or experimental code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Add projects here, they will show up under tf.contrib.
from tensorflow.contrib import bayesflow
from tensorflow.contrib import cloud
from tensorflow.contrib import cluster_resolver
from tensorflow.contrib import compiler
from tensorflow.contrib import copy_graph
from tensorflow.contrib import crf
from tensorflow.contrib import cudnn_rnn
from tensorflow.contrib import data
from tensorflow.contrib import deprecated
from tensorflow.contrib import distributions
from tensorflow.contrib import factorization
from tensorflow.contrib import framework
from tensorflow.contrib import graph_editor
from tensorflow.contrib import grid_rnn
from tensorflow.contrib import image
from tensorflow.contrib import input_pipeline
from tensorflow.contrib import integrate
from tensorflow.contrib import keras
from tensorflow.contrib import kernel_methods
from tensorflow.contrib import labeled_tensor
from tensorflow.contrib import layers
from tensorflow.contrib import learn
from tensorflow.contrib import legacy_seq2seq
from tensorflow.contrib import linalg
from tensorflow.contrib import linear_optimizer
from tensorflow.contrib import lookup
from tensorflow.contrib import losses
from tensorflow.contrib import memory_stats
from tensorflow.contrib import metrics
from tensorflow.contrib import nccl
from tensorflow.contrib import nn
from tensorflow.contrib import opt
from tensorflow.contrib import predictor
from tensorflow.contrib import quantization
from tensorflow.contrib import reduce_slice_ops
from tensorflow.contrib import resampler
from tensorflow.contrib import rnn
from tensorflow.contrib import saved_model
from tensorflow.contrib import seq2seq
from tensorflow.contrib import signal
from tensorflow.contrib import slim
from tensorflow.contrib import solvers
from tensorflow.contrib import sparsemax
from tensorflow.contrib import staging
from tensorflow.contrib import stat_summarizer
from tensorflow.contrib import stateless
from tensorflow.contrib import tensor_forest
from tensorflow.contrib import tensorboard
from tensorflow.contrib import testing
from tensorflow.contrib import tfprof
from tensorflow.contrib import timeseries
from tensorflow.contrib import tpu
from tensorflow.contrib import training
from tensorflow.contrib import util
from tensorflow.contrib.ndlstm import python as ndlstm
from tensorflow.contrib.remote_fused_graph import pylib as remote_fused_graph
from tensorflow.contrib.specs import python as specs
from tensorflow.python.util.lazy_loader import LazyLoader
ffmpeg = LazyLoader("ffmpeg",
globals(), "tensorflow.contrib.ffmpeg")
del LazyLoader
del absolute_import
del division
del print_function
| apache-2.0 |
cwi-dis/igor | igor/access/consistency.py | 1 | 24436 | import random
VERBOSE=False
OWN_NAMESPACE="http://jackjansen.nl/igor/owner" # Shoulnd't be here, really...
class CannotFix(Exception):
pass
class StructuralConsistency:
def __init__(self, igor, fix, namespaces, token, extended=False):
self.igor = igor
self.database = self.igor.database
self.fix = fix
self.namespaces = namespaces
self.token = token
self.extended = extended
self.status = ''
self.nChanges = 0
self.nErrors = 0
def _status(self, msg, isError=True):
if VERBOSE:
print(msg)
self.status += msg + '\n'
if isError:
self.nErrors += 1
def _checkExists(self, path, dontfix=False, context=None):
if VERBOSE:
print(f'consistency._checkExists({path})')
if type(context) == type(''):
contextElements = self.database.getElements(context, 'get', self.token, namespaces=self.namespaces)
if len(contextElements) != 1:
self._status(f'Non-singleton context: {context}')
raise CannotFix
context = contextElements[0]
allElements = self.database.getElements(path, 'get', self.token, namespaces=self.namespaces, context=context)
if len(allElements) == 0:
if self.fix and not dontfix:
parentPath, tag = self.database.splitXPath(path, allowNamespaces=True)
parentElements = self.database.getElements(parentPath, 'post', self.token, namespaces=self.namespaces)
if len(parentElements) != 1:
self._status(f'Cannot create element: non-singleton parent {parentPath}')
raise CannotFix
parentElement = parentElements[0]
if tag[:3] == 'au:':
newElement = self.database.elementFromTagAndData(tag[3:], '', namespace=self.namespaces)
else:
newElement = self.database.elementFromTagAndData(tag, '')
parentElement.appendChild(newElement)
self.database.setChanged()
self.nChanges += 1
self._status(f'Created: {path}', isError=False)
else:
self._status(f'Missing: {path}')
def _checkUnique(self, path, dontfix=False, context=None):
if VERBOSE:
print(f'consistency._checkUnique({path})')
if type(context) == type(''):
contextElements = self.database.getElements(context, 'get', self.token, namespaces=self.namespaces)
if len(contextElements) != 1:
self._status(f'Non-singleton context: {context}')
raise CannotFix
context = contextElements[0]
allElements = self.database.getElements(path, 'get', self.token, namespaces=self.namespaces, context=context)
if len(allElements) > 1:
if self.fix and not dontfix:
self._status(f'Cannot fix yet: should remove additional {path}')
raise CannotFix # Net yet implemented
else:
self._status(f'Non-unique: {path}')
def _checkSingleton(self, path1, path2, dontfix=False, context=None):
if VERBOSE:
print(f'consistency._checkSingleton({path1}, {path2})')
self._checkExists(path1 + '/' + path2, dontfix=dontfix, context=context)
self._checkUnique(path1 + '/' + path2, dontfix=dontfix, context=context)
# if self.extended:
# self._checkUnique('//' + path2, dontfix=dontfix, context=context)
def _getAllElements(self, path):
rv = self.database.getElements(path, 'get', self.token, namespaces=self.namespaces)
if VERBOSE:
print(f'consistency._getAllElements({path}) returns {len(rv)} items')
return rv
def _getValues(self, path, context=None):
if type(context) == type(''):
contextElements = self.database.getElements(context, 'get', self.token, namespaces=self.namespaces)
if len(contextElements) != 1:
self._status(f'Non-singleton context: {context}')
raise CannotFix
context = contextElements[0]
return [x[1] for x in self.database.getValues(path, token=self.token, namespaces=self.namespaces, context=context)]
def _getValue(self, path, context=None):
values = self._getValues(path, context=context)
if len(values) == 0:
return None
if len(values) == 1:
return values[0]
if context and type(context) != type(''):
context = self.database.getXPathForElement(context)
self._status(f'Non-unique value: {path} (context={context})')
raise CannotFix
def _checkInfrastructureItem(self, path, item):
itemTag = item[0]
itemContent = item[1:]
itemPath = path + itemTag
self._checkExists(itemPath)
self._checkUnique(itemPath)
for subItem in itemContent:
self._checkInfrastructureItem(itemPath, subItem)
def _checkNamespaces(self):
rootElements = self.database.getElements('/data', 'get', token=self.token)
if len(rootElements) != 1:
self._status('Multiple /data root elements')
raise CannotFix
rootElement = rootElements[0]
for nsName, nsUrl in list(self.namespaces.items()):
have = rootElement.getAttribute('xmlns:' + nsName)
if have != nsUrl:
if self.fix:
rootElement.setAttribute('xmlns:' + nsName, nsUrl)
self._status(f'Added namespace declaration for xmlns:{nsName}={nsUrl}', isError=False)
self.database.setChanged()
self.nChanges += 1
else:
self._status(f'Missing namespace declaration xmlns:{nsName}={nsUrl}')
raise CannotFix
def _checkPlugins(self):
installedPlugins = set(self.igor.plugins.list())
allPluginOwnedElements = self.database.getElements('//*[@own:plugin]', 'get', token=self.token)
mentionedPlugins = set()
for elt in allPluginOwnedElements:
owner = elt.getAttributeNS(OWN_NAMESPACE, "plugin")
if not owner:
continue
if owner in installedPlugins:
mentionedPlugins.add(owner)
continue
# The plugin owning this element doesn't exist
xp = self.database.getXPathForElement(elt)
if self.fix:
self.database.delValues(xp, self.token)
self.database.setChanged()
self._status(f'Deleted {xp}, belonged to missing plugin {owner}', isError=False)
self.nChanges += 1
else:
self._status(f'Missing plugin "{owner}" owns {xp}')
for plugin in installedPlugins:
if plugin in mentionedPlugins:
continue
self._status(f'Warning: plugin "{plugin}" does not own anything in the database', isError=False)
def do_check(self):
databaseTemplate = (
'/data',
('/environment',
('/systemHealth',
('/messages',),
),
),
('/status',
('/igor',),
('/sensors',),
('/devices',),
('/services',),
),
('/sensors',),
('/devices',),
('/services',
('/igor',),
),
('/people',),
('/identities',
('/admin',)
),
('/actions',),
('/sandbox',),
('/plugindata',),
)
if VERBOSE:
self._status('Starting infrastructure consistency check', isError=False)
try:
#
# Very first check: see whether we have the correct namespace declarations
#
self._checkNamespaces()
#
# Check plugins and corresponding own:plugin attributes
self._checkPlugins()
#
# Now check that we have all the needed infrastructural items
#
self._checkInfrastructureItem('', databaseTemplate)
except CannotFix:
self._status('* Infrastructure consistency check failed', isError=False)
raise
#
# Check that all users are unique and have plugindata entries
#
for userElement in self._getAllElements('/data/identities/*'):
userName = userElement.tagName
if ':' in userName or '{' in userName:
continue # This is not a user but a capability
self._checkUnique(userName, context='/data/identities', dontfix=True)
self._checkSingleton(f'/data/identities/{userName}', 'plugindata')
#
# Now check that all plugins exist
#
# xxxjack to be done
self._status('Infrastructure consistency check finished', isError=False)
return self.nChanges, self.nErrors, self.status
def check(self):
try:
self.do_check()
except CannotFix:
self._status('* No further fixes attempted', isError=False)
if self.nChanges:
self._status('Number of changes made to database: %d' % self.nChanges, isError=False)
if self.nErrors:
self._status('Number of errors remaining: %d' % self.nErrors, isError=False)
rv = self.status
self.status = ''
return self.nChanges, self.nErrors, rv
class CapabilityConsistency(StructuralConsistency):
def _hasCapability(self, location, **kwargs):
expr = location + '/au:capability'
for k, v in list(kwargs.items()):
subExpr = f"[{k}='{v}']"
expr += subExpr
allCaps = self.database.getElements(expr, 'get', token=self.token, namespaces=self.namespaces)
if len(allCaps) == 0:
# If this is a standard capability check whether it exists with incorrect settings
if 'cid' in kwargs:
allCaps = self.database.getElements("//au:capability[cid='{}']".format(kwargs['cid']), 'get', token=self.token, namespaces=self.namespaces)
if len(allCaps):
self._status(f'Standard capability {expr} is in wrong place or has wrong content')
raise CannotFix
if self.fix:
self._createCapability(location, kwargs)
self._status(f'Fixed: Missing standard capability {expr}', isError=False)
else:
self._status(f'Missing standard capability {expr}')
elif len(allCaps) > 1:
self._status(f'Duplicate standard capability {expr}')
def _createCapability(self, location, content):
if not 'cid' in content:
content['cid'] = 'c%d' % random.getrandbits(64)
if content['cid'] != 'root' and not 'parent' in content:
content['parent'] = 'root'
newElement = self.database.elementFromTagAndData('capability', content, namespace=self.namespaces)
parentElements = self.database.getElements(location, 'post', token=self.token, namespaces=self.namespaces)
if len(parentElements) != 1:
self._status(f'Cannot create capability: non-singleton destination {location}')
raise CannotFix
parentElement = parentElements[0]
parentElement.appendChild(newElement)
self.database.setChanged()
self.nChanges += 1
if content['cid'] == 'root':
return
# Update parent, if needed
parentCid = content['parent']
parent = self._getAllElements(f"//au:capability[cid='{parentCid}']")
if len(parent) != 1:
self._status(f'Cannot update parent capability: Multiple capabilities with cid={parentCid}')
raise CannotFix
parent = parent[0]
parent.appendChild(self.database.elementFromTagAndData('child', content['cid']))
self.database.setChanged()
def _fixParentCapability(self, cap, cid):
parentCid = 'root'
cap.appendChild(self.database.elementFromTagAndData('parent', parentCid))
self.database.setChanged()
parent = self._getAllElements(f"//au:capability[cid='{parentCid}']")
if len(parent) != 1:
self._status(f'Cannot update parent capability: Multiple capabilities with cid={parentCid}')
raise CannotFix
parent = parent[0]
parent.appendChild(self.database.elementFromTagAndData('child', cid))
self.database.setChanged()
self.nChanges += 1
def _getTokensNeededByElement(self, element, optional=False):
"""Return a list of dictionaries describing the tokens this element needs"""
nodelist = self.database.getElements("au:needCapability", 'get', self.token, context=element, namespaces=self.namespaces)
if optional:
nodelist += self.database.getElements("au:mayNeedCapability", 'get', self.token, context=element, namespaces=self.namespaces)
tokenDataList = [self.igor.database.tagAndDictFromElement(e)[1] for e in nodelist]
return tokenDataList
def do_check(self):
StructuralConsistency.do_check(self)
if VERBOSE:
self._status('Starting capability consistency check', isError=False)
#
# First set of checks: determine that the infrastructure needed by the capabilities exists
#
self._checkSingleton('/data', 'au:access')
self._checkSingleton('/data/au:access', 'au:defaultCapabilities')
self._checkSingleton('/data/au:access', 'au:exportedCapabilities')
self._checkSingleton('/data/au:access', 'au:revokedCapabilities')
self._checkSingleton('/data/au:access', 'au:unusedCapabilities')
self._checkSingleton('/data/au:access', 'au:sharedKeys')
self._checkExists('/data/identities/admin')
self._checkUnique('/data/identities/admin')
#
# Second set - all the default and important capabilities exist
#
self._hasCapability('/data/identities/admin', cid='root')
self._hasCapability('/data/identities/admin', cid='external', delegate='external')
self._hasCapability('/data/au:access/au:defaultCapabilities', cid='default-static', obj='/static', get='child')
self._hasCapability('/data/au:access/au:defaultCapabilities', cid='default-environment', obj='/data/environment', get='descendant-or-self')
self._hasCapability('/data/au:access/au:defaultCapabilities', cid='default-status', obj='/data/status', get='descendant-or-self')
self._hasCapability('/data/au:access/au:defaultCapabilities', cid='default-igor', obj='/data/services/igor', get='descendant-or-self')
self._hasCapability('/data/au:access/au:defaultCapabilities', cid='default-accessControl', obj='/internal/accessControl', get='child')
self._hasCapability('/data/au:access/au:defaultCapabilities', cid='default-sandbox', obj='/data/sandbox', get='descendant-or-self', put='descendant', post='descendant', delete='descendant')
self._hasCapability('/data/identities', cid='people-people', obj='/data/people', get='descendant-or-self')
self._hasCapability('/data/identities/admin', cid='admin-data', obj='/data', get='descendant-or-self', put='descendant', post='descendant', delete='descendant', delegate='true')
self._hasCapability('/data/identities/admin', cid='admin-action', obj='/action', get='descendant', delegate='true')
self._hasCapability('/data/identities/admin', cid='admin-internal', obj='/internal', get='descendant', delegate='true')
self._hasCapability('/data/identities/admin', cid='admin-plugin', obj='/plugin', get='descendant', delegate='true')
self._hasCapability('/data/identities/admin', cid='admin-filesystem', obj='/filesystem', get='self', delegate='true')
for userElement in self._getAllElements('/data/identities/*'):
userName = userElement.tagName
if ':' in userName or '{' in userName or userName == 'admin':
continue # This is not a user but a capability
userPath = '/data/identities/'+userName
self._hasCapability(userPath, obj=userPath, get='descendant-or-self', put='descendant', post='descendant', delete='descendant', delegate='true')
self._hasCapability(userPath, obj='/data/people/'+userName, put='descendant', post='descendant', delete='descendant', delegate='true')
self._hasCapability('/data/actions', cid='action-plugin', obj='/plugin', get='descendant')
self._hasCapability('/data/actions', cid='action-action', obj='/action', get='descendant')
self._hasCapability('/data/actions', cid='action-internal', obj='/internal', get='descendant')
self._hasCapability('/data/actions', cid='action-environment', obj='/data/environment', get='descendant', put='descendant', post='descendant', delete='descendant')
# Check that actions have the capabilities they need
for actionElement in self._getAllElements('/data/actions/action'):
actionXPath = self.database.getXPathForElement(actionElement)
tokensNeeded = self._getTokensNeededByElement(actionElement, optional=self.extended)
for item in tokensNeeded:
self._hasCapability(actionXPath, **item)
# Check that plugins have the capabilities they need.
for pluginElement in self._getAllElements('/data/plugindata/*'):
pluginName = pluginElement.tagName
if ':' in pluginName or '{' in pluginName:
continue
pluginDataPath = f'/data/plugindata/{pluginName}'
# Plugins specify all capabilities they need in their au:needCapability elements
# But we ensure that it always needs access to its own plugindata
tokensNeeded = [dict(obj=pluginDataPath, get='descendant-or-self')]
tokensNeeded += self._getTokensNeededByElement(pluginElement, optional=self.extended)
for item in tokensNeeded:
self._hasCapability(pluginDataPath, **item)
#
# Second set of checks: test that capability tree is indeed a tree
#
allCapIDs = self._getValues('//au:capability/cid')
if len(allCapIDs) != len(set(allCapIDs)):
for c in set(allCapIDs):
allCapIDs.remove(c)
for c in allCapIDs:
self._status(f'Non-unique cid: {c}')
raise CannotFix
allCaps = self._getAllElements('//au:capability')
cid2cap = {}
cid2parent = {}
# create mapping cid->capability
for cap in allCaps:
cid = self._getValue('cid', cap)
if not cid:
if self.fix:
self._status(f'Cannot fix yet: Capability {self.database.getXPathForElement(cap)} has no cid')
raise CannotFix
else:
self._status(f'Cannot fix yet: Capability {self.database.getXPathForElement(cap)} has no cid')
cid2cap[cid] = cap
# Check parent/child relation for each capability
for cap in allCaps:
cid = self._getValue('cid', cap)
if not cid:
continue # Error given earlier already
for childCid in self._getValues('child::child', cap):
if not childCid in cid2cap:
if self.fix:
self.database.delValues(f"child::child[text()='{childCid}']", token=self.token, context=cap)
self.database.setChanged()
self.nChanges += 1
self._status(f'Removed child {childCid} from {cid}', isError=False)
else:
self._status(f'Non-existing child {childCid} in {cid}')
elif childCid in cid2parent:
if self.fix:
self._status(f'Cannot fix yet: Child with multiple parents: {childCid}')
raise CannotFix
else:
self._status(f'Child with multiple parents: {childCid}')
else:
cid2parent[childCid] = cid
# Check child/parent relation for each capability
for cap in allCaps:
cid = self._getValue('cid', cap)
if not cid:
continue # Error given earlier already
if cid == 'root':
continue
parentCid = self._getValue('child::parent', cap)
expectedParent = cid2parent.get(cid)
if parentCid != expectedParent:
if self.fix:
if expectedParent:
if parentCid:
self._status(f'Cannot fix yet: Inconsistent parent for {cid} ({parentCid} versus {expectedParent})')
raise CannotFix
else:
self._status(f'Cannot fix yet: {cid} has no parent, but is listed as child of {expectedParent}')
raise CannotFix
self.database.delValues('child::parent', token=self.token, context=cap)
self.database.setChanged()
self.nChanges += 1
parentCid = None
else:
if expectedParent and not parentCid:
self._status(f'Capability {cid} has no parent but listed by {expectedParent} as child')
elif parentCid and not expectedParent:
self._status(f'Parent for {cid} is {parentCid} but not listed there as child')
else:
self._status(f'Inconsistent parent for {cid} ({parentCid} versus {expectedParent})')
if not parentCid:
if self.fix:
self._fixParentCapability(cap, cid)
self._status(f'Orphaned capability {cid} given parent root', isError=False)
else:
self._status(f'Capability {self.database.getXPathForElement(cap)} has no parent')
#
# Third set of checks: are capabilities stored in the correct places
#
expectedLocations = (
self._getAllElements('/data/au:access/au:defaultCapabilities') +
self._getAllElements('/data/au:access/au:exportedCapabilities') +
self._getAllElements('/data/au:access/au:unusedCapabilities') +
self._getAllElements('/data/identities') +
self._getAllElements('/data/identities/*') +
self._getAllElements('/data/actions') +
self._getAllElements('/data/actions/action') +
self._getAllElements('/data/plugindata/*')
)
actualLocations = self._getAllElements('//au:capability/..')
badLocations = []
for loc in actualLocations:
if not loc in expectedLocations:
if not loc in badLocations:
badLocations.append(loc)
for loc in badLocations:
parentPath = self.database.getXPathForElement(loc)
cidList = self._getValues('au:capability/cid', context=loc)
if not cidList:
self._status(f'Listed as parent of capabilities but cannot find them: {parentPath}')
continue
for cid in cidList:
if self.fix:
self._status(f'Cannot fix yet: Capability {cid}: in unexpected location {parentPath}')
raise CannotFix
else:
self._status(f'Capability {cid}: in unexpected location {parentPath}')
#
# Fourth set: that we have all the expected capabilities
#
self._status('Capability consistency check finished', isError=False)
| mit |
dch312/numpy | numpy/lib/type_check.py | 41 | 15907 | """Automatically adapted for numpy Sep 19, 2005 by convertcode.py
"""
from __future__ import division, absolute_import, print_function
__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex',
'isreal', 'nan_to_num', 'real', 'real_if_close',
'typename', 'asfarray', 'mintypecode', 'asscalar',
'common_type']
import numpy.core.numeric as _nx
from numpy.core.numeric import asarray, asanyarray, array, isnan, \
obj2sctype, zeros
from .ufunclike import isneginf, isposinf
_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?'
def mintypecode(typechars,typeset='GDFgdf',default='d'):
"""
Return the character for the minimum-size type to which given types can
be safely cast.
The returned type character must represent the smallest size dtype such
that an array of the returned type can handle the data from an array of
all types in `typechars` (or if `typechars` is an array, then its
dtype.char).
Parameters
----------
typechars : list of str or array_like
If a list of strings, each string should represent a dtype.
If array_like, the character representation of the array dtype is used.
typeset : str or list of str, optional
The set of characters that the returned character is chosen from.
The default set is 'GDFgdf'.
default : str, optional
The default character, this is returned if none of the characters in
`typechars` matches a character in `typeset`.
Returns
-------
typechar : str
The character representing the minimum-size type that was found.
See Also
--------
dtype, sctype2char, maximum_sctype
Examples
--------
>>> np.mintypecode(['d', 'f', 'S'])
'd'
>>> x = np.array([1.1, 2-3.j])
>>> np.mintypecode(x)
'D'
>>> np.mintypecode('abceh', default='G')
'G'
"""
typecodes = [(isinstance(t, str) and t) or asarray(t).dtype.char
for t in typechars]
intersection = [t for t in typecodes if t in typeset]
if not intersection:
return default
if 'F' in intersection and 'd' in intersection:
return 'D'
l = []
for t in intersection:
i = _typecodes_by_elsize.index(t)
l.append((i, t))
l.sort()
return l[0][1]
def asfarray(a, dtype=_nx.float_):
"""
Return an array converted to a float type.
Parameters
----------
a : array_like
The input array.
dtype : str or dtype object, optional
Float type code to coerce input array `a`. If `dtype` is one of the
'int' dtypes, it is replaced with float64.
Returns
-------
out : ndarray
The input `a` as a float ndarray.
Examples
--------
>>> np.asfarray([2, 3])
array([ 2., 3.])
>>> np.asfarray([2, 3], dtype='float')
array([ 2., 3.])
>>> np.asfarray([2, 3], dtype='int8')
array([ 2., 3.])
"""
dtype = _nx.obj2sctype(dtype)
if not issubclass(dtype, _nx.inexact):
dtype = _nx.float_
return asarray(a, dtype=dtype)
def real(val):
"""
Return the real part of the elements of the array.
Parameters
----------
val : array_like
Input array.
Returns
-------
out : ndarray
Output array. If `val` is real, the type of `val` is used for the
output. If `val` has complex elements, the returned type is float.
See Also
--------
real_if_close, imag, angle
Examples
--------
>>> a = np.array([1+2j, 3+4j, 5+6j])
>>> a.real
array([ 1., 3., 5.])
>>> a.real = 9
>>> a
array([ 9.+2.j, 9.+4.j, 9.+6.j])
>>> a.real = np.array([9, 8, 7])
>>> a
array([ 9.+2.j, 8.+4.j, 7.+6.j])
"""
return asanyarray(val).real
def imag(val):
"""
Return the imaginary part of the elements of the array.
Parameters
----------
val : array_like
Input array.
Returns
-------
out : ndarray
Output array. If `val` is real, the type of `val` is used for the
output. If `val` has complex elements, the returned type is float.
See Also
--------
real, angle, real_if_close
Examples
--------
>>> a = np.array([1+2j, 3+4j, 5+6j])
>>> a.imag
array([ 2., 4., 6.])
>>> a.imag = np.array([8, 10, 12])
>>> a
array([ 1. +8.j, 3.+10.j, 5.+12.j])
"""
return asanyarray(val).imag
def iscomplex(x):
"""
Returns a bool array, where True if input element is complex.
What is tested is whether the input has a non-zero imaginary part, not if
the input type is complex.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray of bools
Output array.
See Also
--------
isreal
iscomplexobj : Return True if x is a complex type or an array of complex
numbers.
Examples
--------
>>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j])
array([ True, False, False, False, False, True], dtype=bool)
"""
ax = asanyarray(x)
if issubclass(ax.dtype.type, _nx.complexfloating):
return ax.imag != 0
res = zeros(ax.shape, bool)
return +res # convet to array-scalar if needed
def isreal(x):
"""
Returns a bool array, where True if input element is real.
If element has complex type with zero complex part, the return value
for that element is True.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray, bool
Boolean array of same shape as `x`.
See Also
--------
iscomplex
isrealobj : Return True if x is not a complex type.
Examples
--------
>>> np.isreal([1+1j, 1+0j, 4.5, 3, 2, 2j])
array([False, True, True, True, True, False], dtype=bool)
"""
return imag(x) == 0
def iscomplexobj(x):
"""
Check for a complex type or an array of complex numbers.
The type of the input is checked, not the value. Even if the input
has an imaginary part equal to zero, `iscomplexobj` evaluates to True.
Parameters
----------
x : any
The input can be of any type and shape.
Returns
-------
iscomplexobj : bool
The return value, True if `x` is of a complex type or has at least
one complex element.
See Also
--------
isrealobj, iscomplex
Examples
--------
>>> np.iscomplexobj(1)
False
>>> np.iscomplexobj(1+0j)
True
>>> np.iscomplexobj([3, 1+0j, True])
True
"""
return issubclass(asarray(x).dtype.type, _nx.complexfloating)
def isrealobj(x):
"""
Return True if x is a not complex type or an array of complex numbers.
The type of the input is checked, not the value. So even if the input
has an imaginary part equal to zero, `isrealobj` evaluates to False
if the data type is complex.
Parameters
----------
x : any
The input can be of any type and shape.
Returns
-------
y : bool
The return value, False if `x` is of a complex type.
See Also
--------
iscomplexobj, isreal
Examples
--------
>>> np.isrealobj(1)
True
>>> np.isrealobj(1+0j)
False
>>> np.isrealobj([3, 1+0j, True])
False
"""
return not issubclass(asarray(x).dtype.type, _nx.complexfloating)
#-----------------------------------------------------------------------------
def _getmaxmin(t):
from numpy.core import getlimits
f = getlimits.finfo(t)
return f.max, f.min
def nan_to_num(x):
"""
Replace nan with zero and inf with finite numbers.
Returns an array or scalar replacing Not a Number (NaN) with zero,
(positive) infinity with a very large number and negative infinity
with a very small (or negative) number.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray, float
Array with the same shape as `x` and dtype of the element in `x` with
the greatest precision. NaN is replaced by zero, and infinity
(-infinity) is replaced by the largest (smallest or most negative)
floating point value that fits in the output dtype. All finite numbers
are upcast to the output dtype (default float64).
See Also
--------
isinf : Shows which elements are negative or negative infinity.
isneginf : Shows which elements are negative infinity.
isposinf : Shows which elements are positive infinity.
isnan : Shows which elements are Not a Number (NaN).
isfinite : Shows which elements are finite (not NaN, not infinity)
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.set_printoptions(precision=8)
>>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
>>> np.nan_to_num(x)
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000,
-1.28000000e+002, 1.28000000e+002])
"""
try:
t = x.dtype.type
except AttributeError:
t = obj2sctype(type(x))
if issubclass(t, _nx.complexfloating):
return nan_to_num(x.real) + 1j * nan_to_num(x.imag)
else:
try:
y = x.copy()
except AttributeError:
y = array(x)
if not issubclass(t, _nx.integer):
if not y.shape:
y = array([x])
scalar = True
else:
scalar = False
are_inf = isposinf(y)
are_neg_inf = isneginf(y)
are_nan = isnan(y)
maxf, minf = _getmaxmin(y.dtype.type)
y[are_nan] = 0
y[are_inf] = maxf
y[are_neg_inf] = minf
if scalar:
y = y[0]
return y
#-----------------------------------------------------------------------------
def real_if_close(a,tol=100):
"""
If complex input returns a real array if complex parts are close to zero.
"Close to zero" is defined as `tol` * (machine epsilon of the type for
`a`).
Parameters
----------
a : array_like
Input array.
tol : float
Tolerance in machine epsilons for the complex part of the elements
in the array.
Returns
-------
out : ndarray
If `a` is real, the type of `a` is used for the output. If `a`
has complex elements, the returned type is float.
See Also
--------
real, imag, angle
Notes
-----
Machine epsilon varies from machine to machine and between data types
but Python floats on most platforms have a machine epsilon equal to
2.2204460492503131e-16. You can use 'np.finfo(np.float).eps' to print
out the machine epsilon for floats.
Examples
--------
>>> np.finfo(np.float).eps
2.2204460492503131e-16
>>> np.real_if_close([2.1 + 4e-14j], tol=1000)
array([ 2.1])
>>> np.real_if_close([2.1 + 4e-13j], tol=1000)
array([ 2.1 +4.00000000e-13j])
"""
a = asanyarray(a)
if not issubclass(a.dtype.type, _nx.complexfloating):
return a
if tol > 1:
from numpy.core import getlimits
f = getlimits.finfo(a.dtype.type)
tol = f.eps * tol
if _nx.allclose(a.imag, 0, atol=tol):
a = a.real
return a
def asscalar(a):
"""
Convert an array of size 1 to its scalar equivalent.
Parameters
----------
a : ndarray
Input array of size 1.
Returns
-------
out : scalar
Scalar representation of `a`. The output data type is the same type
returned by the input's `item` method.
Examples
--------
>>> np.asscalar(np.array([24]))
24
"""
return a.item()
#-----------------------------------------------------------------------------
_namefromtype = {'S1': 'character',
'?': 'bool',
'b': 'signed char',
'B': 'unsigned char',
'h': 'short',
'H': 'unsigned short',
'i': 'integer',
'I': 'unsigned integer',
'l': 'long integer',
'L': 'unsigned long integer',
'q': 'long long integer',
'Q': 'unsigned long long integer',
'f': 'single precision',
'd': 'double precision',
'g': 'long precision',
'F': 'complex single precision',
'D': 'complex double precision',
'G': 'complex long double precision',
'S': 'string',
'U': 'unicode',
'V': 'void',
'O': 'object'
}
def typename(char):
"""
Return a description for the given data type code.
Parameters
----------
char : str
Data type code.
Returns
-------
out : str
Description of the input data type code.
See Also
--------
dtype, typecodes
Examples
--------
>>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q',
... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q']
>>> for typechar in typechars:
... print typechar, ' : ', np.typename(typechar)
...
S1 : character
? : bool
B : unsigned char
D : complex double precision
G : complex long double precision
F : complex single precision
I : unsigned integer
H : unsigned short
L : unsigned long integer
O : object
Q : unsigned long long integer
S : string
U : unicode
V : void
b : signed char
d : double precision
g : long precision
f : single precision
i : integer
h : short
l : long integer
q : long long integer
"""
return _namefromtype[char]
#-----------------------------------------------------------------------------
#determine the "minimum common type" for a group of arrays.
array_type = [[_nx.single, _nx.double, _nx.longdouble],
[_nx.csingle, _nx.cdouble, _nx.clongdouble]]
array_precision = {_nx.single: 0,
_nx.double: 1,
_nx.longdouble: 2,
_nx.csingle: 0,
_nx.cdouble: 1,
_nx.clongdouble: 2}
def common_type(*arrays):
"""
Return a scalar type which is common to the input arrays.
The return type will always be an inexact (i.e. floating point) scalar
type, even if all the arrays are integer arrays. If one of the inputs is
an integer array, the minimum precision type that is returned is a
64-bit floating point dtype.
All input arrays can be safely cast to the returned dtype without loss
of information.
Parameters
----------
array1, array2, ... : ndarrays
Input arrays.
Returns
-------
out : data type code
Data type code.
See Also
--------
dtype, mintypecode
Examples
--------
>>> np.common_type(np.arange(2, dtype=np.float32))
<type 'numpy.float32'>
>>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2))
<type 'numpy.float64'>
>>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0]))
<type 'numpy.complex128'>
"""
is_complex = False
precision = 0
for a in arrays:
t = a.dtype.type
if iscomplexobj(a):
is_complex = True
if issubclass(t, _nx.integer):
p = 1
else:
p = array_precision.get(t, None)
if p is None:
raise TypeError("can't get common type for non-numeric array")
precision = max(precision, p)
if is_complex:
return array_type[1][precision]
else:
return array_type[0][precision]
| bsd-3-clause |
djgroen/flee-release | test_awareness.py | 1 | 1322 | import flee.flee as flee
import datamanager.handle_refugee_data as handle_refugee_data
import numpy as np
import outputanalysis.analysis as a
import sys
"""
Generation 1 code. Incorporates only distance, travel always takes one day.
"""
if __name__ == "__main__":
print("Testing basic data handling and simulation kernel.")
flee.SimulationSettings.MinMoveSpeed=5000.0
flee.SimulationSettings.MaxMoveSpeed=5000.0
flee.SimulationSettings.MaxWalkSpeed=5000.0
if(len(sys.argv)>1):
flee.SimulationSettings.AwarenessLevel = int(sys.argv[1])
end_time = 10
e = flee.Ecosystem()
l1 = e.addLocation("A", movechance=0.3)
l2 = e.addLocation("B", movechance=0.3)
l3 = e.addLocation("C", movechance=0.3)
l4 = e.addLocation("D", movechance=0.3)
l5 = e.addLocation("C2", movechance=0.3)
l6 = e.addLocation("D2", movechance=0.3)
l7 = e.addLocation("D3", movechance="camp")
e.linkUp("A","B","834.0")
e.linkUp("A","C","834.0")
e.linkUp("A","D","834.0")
e.linkUp("C","C2","834.0")
e.linkUp("C","D2","834.0")
e.linkUp("D","D2","834.0")
e.linkUp("D2","D3","834.0")
e.linkUp("C2","D3","834.0")
e.addAgent(location=l1)
e.agents[0].selectRouteRuleset2(debug=True)
#for t in range(0,end_time):
# Propagate the model by one time step.
# e.evolve()
print("Test successful!")
| bsd-3-clause |
walimis/openembedded-gpib2410 | bitbake/lib/bb/msg.py | 5 | 3091 | # ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
BitBake 'msg' implementation
Message handling infrastructure for bitbake
"""
# Copyright (C) 2006 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys, os, re, bb
from bb import utils, event
debug_level = {}
verbose = False
domain = bb.utils.Enum(
'Build',
'Cache',
'Collection',
'Data',
'Depends',
'Fetcher',
'Parsing',
'PersistData',
'Provider',
'RunQueue',
'TaskData',
'Util')
class MsgBase(bb.event.Event):
"""Base class for messages"""
def __init__(self, msg, d ):
self._message = msg
event.Event.__init__(self, d)
class MsgDebug(MsgBase):
"""Debug Message"""
class MsgNote(MsgBase):
"""Note Message"""
class MsgWarn(MsgBase):
"""Warning Message"""
class MsgError(MsgBase):
"""Error Message"""
class MsgFatal(MsgBase):
"""Fatal Message"""
class MsgPlain(MsgBase):
"""General output"""
#
# Message control functions
#
def set_debug_level(level):
bb.msg.debug_level = {}
for domain in bb.msg.domain:
bb.msg.debug_level[domain] = level
bb.msg.debug_level['default'] = level
def set_verbose(level):
bb.msg.verbose = level
def set_debug_domains(domains):
for domain in domains:
found = False
for ddomain in bb.msg.domain:
if domain == str(ddomain):
bb.msg.debug_level[ddomain] = bb.msg.debug_level[ddomain] + 1
found = True
if not found:
bb.msg.warn(None, "Logging domain %s is not valid, ignoring" % domain)
#
# Message handling functions
#
def debug(level, domain, msg, fn = None):
bb.event.fire(MsgDebug(msg, None))
if not domain:
domain = 'default'
if debug_level[domain] >= level:
print 'DEBUG: ' + msg
def note(level, domain, msg, fn = None):
bb.event.fire(MsgNote(msg, None))
if not domain:
domain = 'default'
if level == 1 or verbose or debug_level[domain] >= 1:
print 'NOTE: ' + msg
def warn(domain, msg, fn = None):
bb.event.fire(MsgWarn(msg, None))
print 'WARNING: ' + msg
def error(domain, msg, fn = None):
bb.event.fire(MsgError(msg, None))
print 'ERROR: ' + msg
def fatal(domain, msg, fn = None):
bb.event.fire(MsgFatal(msg, None))
print 'ERROR: ' + msg
sys.exit(1)
def plain(msg, fn = None):
bb.event.fire(MsgPlain(msg, None))
print msg
| mit |
gbiggs/flexicomps | flexifilter/flexifilter.py | 1 | 16791 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''flexicomps
Copyright (C) 2008-2010
Geoffrey Biggs
RT-Synthesis Research Group
Intelligent Systems Research Institute,
National Institute of Advanced Industrial Science and Technology (AIST),
Japan
All rights reserved.
Licensed under the Eclipse Public License -v 1.0 (EPL)
http://www.opensource.org/licenses/eclipse-1.0.txt
File: flexifilter.py
FlexiFilter component.
'''
__version__ = '$Revision: $'
# $Source$
import inspect, re, sys
from traceback import print_exception
from optparse import OptionParser, OptionError
import OpenRTM_aist, RTC
from typemap import typeMap, multMap
#Globals set by command line options
inputPorts = []
outputPorts = []
newestTime = False
verbosity = 0
zeroOld = False
def CompTimes (t1, t2):
if t1.sec < t2.sec:
return -1
elif t1.sec > t2.sec:
return 1
elif t1.sec == t2.sec:
if t1.nsec < t2.nsec:
return -1
elif t1.nsec > t2.nsec:
return 1
else:
return 0
# Class to store a pin mapping
class PinMapping:
def __init__ (self, port, pin, multiplier = None, postMult = False, convFunc = None):
self.port = port # Index into the inputPorts list for this pin's source
self.pin = pin # Pin number on the input port for this pin's source
self.multiplier = multiplier # None for no multiplier
self.postMult = postMult # False to pre-multiply
self.convFunc = convFunc # None for no conversion function
def ConvertData (self, data):
# Check if conversion is needed
if self.convFunc is None:
if self.multiplier is not None:
# No conversion, but have a multiplier
return data * self.multiplier
else:
# No conversion, no multiplier
return data
else:
# Have conversion
outputData = data
if self.multiplier is not None and not self.postMult:
# Pre-multiply
outputData *= self.multiplier
# Convert the data
outputData = self.convFunc (outputData)
if self.multiplier is not None and self.postMult:
# Post-multiply
outputData *= self.multiplier
return outputData
# Structure to store a port
class Port:
def __init__ (self, desc, index, type, length, emptyVal):
self.desc = desc # The string from the command line for this port
self.index = index # Port number
self.type = type # Port type object
self.length = length # Port length (must be 0 for non-sequence ports)
self.emptyVal = emptyVal # Value when no data
self.data = None # Data storage for the port (set later)
self.portObj = None # The actual port object (set later)
# Source(s) for this port (initialised now, possibly set later)
if self.length == 0:
# Scalar ports have None or a PinMapping object
self.sourceMap = None
else:
# This list contains one PinMapping for each pin on the port, or None for no input to that pin.
self.sourceMap = [None for ii in range (self.length)]
# Data received times for this port (set later)
if self.length == 0:
self.times = None # No need to store a history of received times for a scalar
else:
self.times = [RTC.Time (0, 0) for ii in range (self.length)]
def GetDataTime (self):
if self.length == 0:
return self.data.tm
times = self.times
times.sort (cmp = CompTimes)
if newestTime:
return times[-1]
else:
return times[0]
flexifilter_spec = ['implementation_id', 'FlexiFilter',
'type_name', 'FlexiFilter',
'description', 'Flexible filter',
'version', '0.0.1',
'vendor', 'Geoffrey Biggs, AIST',
'category', 'DataConsumer',
'activity_type', 'DataFlowComponent',
'max_instance', '999',
'language', 'Python',
'lang_type', 'SCRIPT',
'']
class FlexiFilter (OpenRTM_aist.DataFlowComponentBase):
def __init__ (self, manager):
OpenRTM_aist.DataFlowComponentBase.__init__ (self, manager)
self.__inputPorts = inputPorts
self.__outputPorts = outputPorts
def onStartup (self, ec_id):
try:
# Each port list is a list of tuples containing (data object, port object)
self.__numInputPorts = len (self.__inputPorts)
self.__numOutputPorts = len (self.__outputPorts)
for newPort in self.__inputPorts:
newPortData = newPort.type (RTC.Time (0, 0), [])
newPortPort = OpenRTM_aist.InPort ('input%d' % newPort.index, newPortData, OpenRTM_aist.RingBuffer (8))
self.registerInPort ('input%d' % newPort.index, newPortPort)
newPort.data = newPortData
newPort.portObj = newPortPort
for newPort in self.__outputPorts:
newPortData = newPort.type (RTC.Time (0, 0), [newPort.emptyVal for ii in range (newPort.length)])
newPortPort = OpenRTM_aist.OutPort ('output%d' % newPort.index, newPortData, OpenRTM_aist.RingBuffer (8))
self.registerOutPort ('output%d' % newPort.index, newPortPort)
newPort.data = newPortData
newPort.portObj = newPortPort
except:
print_exception (*sys.exc_info ())
return RTC.RTC_ERROR
return RTC.RTC_OK
def onActivated (self, ec_id):
for port in self.__outputPorts:
# Clear out old data
if port.length != 0:
port.data.data = [port.emptyVal for kk in range (port.length)]
else:
port.data.data = port.emptyVal
return RTC.RTC_OK
def onExecute (self, ec_id):
try:
haveNewData = False
inputData = []
outputPortIsNew = [False for ii in range (self.__numOutputPorts)]
for port in self.__inputPorts:
if port.portObj.isNew ():
inputData.append (port.portObj.read ())
haveNewData = True
if verbosity >= 2:
print 'Input port %d has new data: ' % port.index + str (inputData[-1].data)
else:
inputData.append (None) # No data for this port
if not haveNewData:
return RTC.RTC_OK
# For each pin on each output port, check if its input has new data and convert as appropriate
for port in self.__outputPorts:
if zeroOld:
# Clear out old data (this will only be written if there is new data for this port available)
if port.length != 0:
port.data.data = [port.emptyVal for kk in range (port.length)]
else:
port.data.data = port.emptyVal
if port.length == 0: # Scalar port
if port.sourceMap is None:
continue # No input for this pin
if inputData[port.sourceMap.port] is None:
continue # No new data for this pin
if self.__inputPorts[port.sourceMap.port].length == 0:
newData = inputData[port.sourceMap.port].data
else:
newData = inputData[port.sourceMap.port].data[port.sourceMap.pin]
# Copy the (possibly converted) data to the output port and mark the port as new
port.data.data = port.sourceMap.ConvertData (newData)
port.data.tm = inputData[port.sourceMap.port].tm
outputPortIsNew[port.index] = True
else:
for ii in range (port.length):
if port.sourceMap[ii] is None:
continue # No input for this pin
sourceMap = port.sourceMap[ii]
if inputData[sourceMap.port] is None:
continue # No new data for this pin
if self.__inputPorts[sourceMap.port].length == 0:
newData = inputData[sourceMap.port].data
else:
newData = inputData[sourceMap.port].data[sourceMap.pin]
# Copy the (possibly converted) data to the output port and mark the port as new
port.data.data[ii] = sourceMap.ConvertData (newData)
port.times[ii] = inputData[sourceMap.port].tm
port.data.tm = port.GetDataTime ()
outputPortIsNew[port.index] = True
# Write each output port that has new data
for ii in range (self.__numOutputPorts):
if outputPortIsNew[ii]:
if verbosity >= 2:
print 'Output port %d has new data: ' % ii + str (self.__outputPorts[ii].data.data)
self.__outputPorts[ii].portObj.write ()
except:
print_exception (*sys.exc_info ())
return RTC.RTC_OK
def FlexiFilterInit (manager):
profile = OpenRTM_aist.Properties (defaults_str = flexifilter_spec)
manager.registerFactory (profile, FlexiFilter, OpenRTM_aist.Delete)
comp = manager.createComponent ("FlexiFilter")
def FindPortType (typeName):
types = [member for member in inspect.getmembers (RTC, inspect.isclass) if member[0] == typeName]
if len (types) == 0:
print 'Type "' + typeName + '" not found in module RTC'
return None
elif len (types) != 1:
print 'Type name "' + typeName + '" is ambiguous: ' + str ([member[0] for member in types])
return None
return types[0][1]
def GetConversionFunction (inputType, outputType):
if outputType not in typeMap:
print 'Unknown output type: ' + outputType
return None
convFunc = typeMap[outputType][0]
postMultiply = multMap[inputType][outputType]
if postMultiply == -1:
print 'Cannot multiply these data types: ' + inputType + ', ' + outputType
return None
return convFunc, postMultiply
def PrintMap (ports):
for port in ports:
print 'Port ' + str (port.index)
if port.length == 0:
print 'Scalar:',
if port.sourceMap == None:
print '-'
else:
if port.sourceMap.multiplier != None:
multStr = '\tMultiplier: ' + str (port.sourceMap.multiplier)
if port.sourceMap.postMult:
multStr += ' (Post-multiplied)'
else:
multStr += ' (Pre-multiplied)'
else:
multStr = ''
if port.sourceMap.convFunc == None:
convStr = 'No conversion'
else:
convStr = str (port.sourceMap.convFunc)
print 'Input: %d:%d\tType: ' % (port.sourceMap.port, port.sourceMap.pin) + convStr + multStr
else:
for jj in range (len (port.sourceMap)):
print 'Pin %d:' % jj,
pinMap = port.sourceMap[jj]
if pinMap == None:
print '-'
else:
if pinMap.multiplier != None:
multStr = '\tMultiplier: ' + str (pinMap.multiplier)
if pinMap.postMult:
multStr += ' (Post-multiplied)'
else:
multStr += ' (Pre-multiplied)'
else:
multStr = ''
if pinMap.convFunc == None:
convStr = 'No conversion'
else:
convStr = str (pinMap.convFunc)
print 'Input: %d:%d\tType: ' % (pinMap.port, pinMap.pin) + convStr + multStr
def DecodePortStr (portStr):
portInfo = portStr.rsplit (':')
if len (portInfo) == 1:
portTypeStr = portInfo[0]
portLengthStr = 0
else:
portTypeStr = portInfo[0]
portLengthStr = portInfo[1]
portType = FindPortType (portTypeStr)
if portType == None:
return None
if portTypeStr not in typeMap:
print 'Unknown port type: ' + portTypeStr
return None
try:
portLength = int (portLengthStr)
except ValueError:
print 'Invalid port length: ' + portLengthStr
return None
# Sanity check on the port length
if portLength == 0:
if portTypeStr.endswith ('Seq'):
print 'Sequence port type has length of zero: ' + portStr
return None
else:
if not portTypeStr.endswith ('Seq'):
print 'Non-sequence port type has non-zero length: ' + portStr
return None
return Port (portStr, 0, portType, portLength, typeMap[portTypeStr][1])
def GetPortOptions ():
global inputPorts, outputPorts, newestTime, verbosity, zeroOld
try:
usage = 'usage: %prog [options]\nMap input ports to output ports, with multipliers and type conversion.'
parser = OptionParser (usage = usage)
parser.add_option ('-i', '--inputport', dest = 'inputport', type = 'string', action = 'append', default = [],
help = 'Input port specification in the format "type[:length]". Length is necessary for sequence ports.')
parser.add_option ('-m', '--map', dest = 'mapping', type = 'string', default = '0>0',
help = 'Mapping from input elements to output elements. [Default: %default]\n'\
'Port/pin mappings are specified as a comma-separated list of number pairs with optional '\
'multipliers and pin numbers. Unspecified pins default to 0. '\
'For example, "2>0:2,0:1>0.1>0:3,0:1>1" will map input port 2 pin '\
'0 to output port 0 pin 2, input port 0 pin 1 to output port 0 pin 3 with a multiplier of '\
'0.1, and input port 0 pin 1 to output port 1 pin 0.')
parser.add_option ('-n', '--newesttime', dest = 'newestTime', action = 'store_true', default = False,
help = 'Use the time of the most recent data for an output port\'s time, rather than the time of '\
'the oldest data used on the port. Only applies to sequence ports. [Default: %default]')
parser.add_option ('-o', '--outputport', dest = 'outputport', type = 'string', action = 'append', default = [],
help = 'Output port specification in the format "type[:length]". Length is necessary for sequence ports.')
parser.add_option ('-v', '--verbosity', dest = 'verbosity', type = 'int', default = 0,
help = 'Verbosity level (higher numbers give more output). [Default: %default]')
parser.add_option ('-z', '--zeroold', dest = 'zeroOld', action = 'store_true', default = False,
help = 'If new data is not available on one of the source ports for an output, pins connected to that port '\
'are set to zero (or equivalent) when other pins are updated. Otherwise, they remain at their previous '\
'value. Only applies to sequence ports. [Default: %default]')
options, args = parser.parse_args ()
except OptionError, e:
print 'OptionError: ' + str (e)
return False
newestTime = options.newestTime
zeroOld = options.zeroOld
if len (options.inputport) == 0:
parser.error ('Must specify at least one input port.')
if len (options.outputport) == 0:
parser.error ('Must specify at least one input port.')
portCount = 0
for portStr in options.inputport:
portInfo = DecodePortStr (portStr)
if portInfo == None:
parser.error ('Invalid port: ' + portStr)
portInfo.index = portCount
inputPorts.append (portInfo)
if verbosity >= 2:
print 'Added input port: ' + str (portInfo)
portCount += 1
portCount = 0
for portStr in options.outputport:
portInfo = DecodePortStr (portStr)
if portInfo == None:
parser.error ('Invalid port: ' + portStr)
portInfo.index = portCount
outputPorts.append (portInfo)
if verbosity >= 2:
print 'Added output port: ' + str (portInfo)
portCount += 1
# Parse port/pin mapping string
mapPattern = re.compile ('(?P<inPort>\d+)(:(?P<inPin>\d+))?>((?P<mult>[-\d.]+)>)?(?P<outPort>\d+)(:(?P<outPin>\d+))?')
for m in mapPattern.finditer (options.mapping):
mappingString = m.string[m.start ():m.end ()]
groups = m.groupdict ()
try:
if groups['inPort'] == None:
inPort = 0
else:
inPort = int (groups['inPort'])
if groups['inPin'] == None:
inPin = 0
else:
inPin = int (groups['inPin'])
if groups['mult'] == None:
mult = None
else:
mult = float (groups['mult'])
if groups['outPort'] == None:
outPort = 0
else:
outPort = int (groups['outPort'])
if groups['outPin'] == None:
outPin = 0
else:
outPin = int (groups['outPin'])
except ValueError, e:
parser.error ('Bad value in mapping "' + mappingString + '": ' + str (e))
#if options.verbosity >= 2:
#print 'Processing mapping "' + mappingString + \
#'" input = %d:%d\tmultiplier = %s\toutput = %d:%d' % (inPort, inPin, str (mult), outPort, outPin)
if inPort < 0 or inPort >= len (inputPorts):
parser.error ('Bad input port number in mapping "' + mappingString + '"')
if (inPin < 0 or inPin >= inputPorts[inPort].length) and inputPorts[inPort].length != 0:
parser.error ('Bad input pin number in mapping "' + mappingString + '"')
if outPort < 0 or outPort >= len (outputPorts):
parser.error ('Bad output port number in mapping "' + mappingString + '"')
if (outPin < 0 or outPin >= outputPorts[outPort].length) and outputPorts[outPort].length != 0:
parser.error ('Bad output pin number in mapping "' + mappingString + '"')
# Get conversion function for this output pin
if inputPorts[inPort].type == outputPorts[outPort].type:
# Same type, no conversion necessary
convFunc = None
postMultiply = False
else:
convFunc, postMultiply = GetConversionFunction (inputPorts[inPort].type.__name__, outputPorts[outPort].type.__name__)
if convFunc == None:
return False
# Set the entry in the map
if outputPorts[outPort].length == 0:
outputPorts[outPort].sourceMap = PinMapping (inPort, inPin, mult, postMultiply, convFunc)
else:
outputPorts[outPort].sourceMap[outPin] = PinMapping (inPort, inPin, mult, postMultiply, convFunc)
if options.verbosity:
verbosity = options.verbosity
print 'Output map:'
PrintMap (outputPorts)
# Strip the options we use from sys.argv to avoid confusing the manager's option parser
sys.argv = [option for option in sys.argv if option not in parser._short_opt.keys () + parser._long_opt.keys ()]
return True
def main ():
# Check options for ports
if not GetPortOptions ():
return 1
mgr = OpenRTM_aist.Manager.init (len (sys.argv), sys.argv)
mgr.setModuleInitProc (FlexiFilterInit)
mgr.activateManager ()
mgr.runManager ()
if __name__ == "__main__":
main ()
| epl-1.0 |
johnkeepmoving/oss-ftp | python27/win32/Lib/site-packages/cryptography/hazmat/primitives/hmac.py | 61 | 2353 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from cryptography import utils
from cryptography.exceptions import (
AlreadyFinalized, UnsupportedAlgorithm, _Reasons
)
from cryptography.hazmat.backends.interfaces import HMACBackend
from cryptography.hazmat.primitives import hashes, interfaces
@utils.register_interface(interfaces.MACContext)
@utils.register_interface(hashes.HashContext)
class HMAC(object):
def __init__(self, key, algorithm, backend, ctx=None):
if not isinstance(backend, HMACBackend):
raise UnsupportedAlgorithm(
"Backend object does not implement HMACBackend.",
_Reasons.BACKEND_MISSING_INTERFACE
)
if not isinstance(algorithm, hashes.HashAlgorithm):
raise TypeError("Expected instance of hashes.HashAlgorithm.")
self._algorithm = algorithm
self._backend = backend
self._key = key
if ctx is None:
self._ctx = self._backend.create_hmac_ctx(key, self.algorithm)
else:
self._ctx = ctx
algorithm = utils.read_only_property("_algorithm")
def update(self, data):
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
if not isinstance(data, bytes):
raise TypeError("data must be bytes.")
self._ctx.update(data)
def copy(self):
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
return HMAC(
self._key,
self.algorithm,
backend=self._backend,
ctx=self._ctx.copy()
)
def finalize(self):
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
digest = self._ctx.finalize()
self._ctx = None
return digest
def verify(self, signature):
if not isinstance(signature, bytes):
raise TypeError("signature must be bytes.")
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
ctx, self._ctx = self._ctx, None
ctx.verify(signature)
| mit |
hehongliang/tensorflow | tensorflow/python/estimator/export/export_lib.py | 41 | 1298 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""export_lib python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.python.estimator.export import export_lib
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
export_lib.__all__ = [s for s in dir(export_lib) if not s.startswith('__')]
from tensorflow_estimator.python.estimator.export.export_lib import *
| apache-2.0 |
kaustubhsant/MugenDB | monitornode/Monitor_server.py | 1 | 5923 | import sys
import socket
import sqlite3
import select
import sys
import threading
import thread
from threading import Thread, Lock
from multiprocessing.pool import ThreadPool
import json
number_of_masters = 2
masters = dict()
clients = dict()
threshold = {'Master1':'No','Master2':'No'}
i = 0
receivestatus_port = 10008
def thresholdListen():
'''listen for threshold from the masters and update threshold dictionary.this will be used will redirecting requests to master.'''
print 'Threshold daemon running'
portNumber = 10007
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
host = socket.gethostname()
sock.bind((host, portNumber))
while True:
request, addr = sock.recvfrom(1024)
master,val = request.split(" ")
threshold[master]= val
def informslaves(data):
req = {"request":"New","data":data}
with open("config/slave.txt",'r') as fin:
for line in fin:
host,port = line.strip().split("=")[1].split(",")[0].split(":")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(json.dumps(data), (host,int(port)))
sock.close()
def receiveStatus():
'''get the status for processed request from master and send it back to client'''
global masters
print 'Listening for status on port ' + str(receivestatus_port)
portNumber = receivestatus_port
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
host = socket.gethostname()
sock.bind((host, portNumber))
while True:
status, addr = sock.recvfrom(1024)
returnobj=json.loads(status)
if returnobj['result'] == "shutdown":
masters = {k:v for k,v in masters.items if v.split(":")[0] != addr[0]}
elif returnobj['result'] == "New":
data = "Master{}={}:{}".format(len(masters)+1,addr[0],returnobj['port'])
with open("config/masters.txt",'a') as myfile:
myfile.write("{}\n".format(data))
with open("config/slave.txt",'r') as fin:
slaves = ""
for line in fin:
slaves = slaves + line
sock.sendto(slaves,(addr[0],int(returnobj['port'])))
informslaves(data)
masters["Master{}".format(len(masters)+1)] = "{}:{}".format(addr[0],returnobj['port'])
else:
clients[returnobj['userid']].send(str(returnobj['result']))
class Server:
''' Accept requests from clients and redirect them to masters in round robin pattern'''
def __init__(self):
self.host = ''
self.port = 13464
self.backlog = 5
self.size = 1024
self.server = None
self.pool = ThreadPool(10)
with open("config/masters.txt") as myfile:
for line in myfile:
name, endpoint = line.partition("=")[::2]
masters[name] = endpoint
#name = "Master1"
#endpoint = "localhost:10003"
def open_socket(self):
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind((self.host,self.port))
self.server.listen(5)
def run(self):
self.open_socket()
input = [self.server,]
running = 1
#start a daemon thread to listen threshold
thread = Thread(target = thresholdListen, args = ())
thread.start()
#start a thread to listen results of req from masters
resultThread = Thread(target = receiveStatus, args = ())
resultThread.start()
while running:
inputready,outputready,exceptready = select.select(input,[],[])
for s in inputready:
if s == self.server:
client,address = self.server.accept()
#Assign one thread to handle each client.
self.pool.apply_async(run, args=(client,address))
else:
junk = sys.stdin.readline()
running = 0
self.server.close()
def getMaster():
'''This method will return the next master to which the request should be redirected.'''
global i
i=((i)%(number_of_masters))+1
while True:
if threshold['Master'+str(i)] == 'Yes':
i=((i)%(number_of_masters))+1
else:
break
return 'Master'+str(i)
def run(client,address):
'''This method will be run in seperate thread to process client requests.'''
size = 1024
running = 1
attempts = 0
flag = 0
while running:
while(attempts < 3 and flag == 0):
attempts = attempts + 1
data = json.loads(client.recv(size))
if data:
clients[data['username']]=client
conn_2 = sqlite3.connect('authentication_info.db')
c_2 = conn_2.cursor()
password = ""
for row in c_2.execute("SELECT password from user_info where username = '%s'" % data['username']):
password = row
conn_2.close()
if not password:
client.send('Login failed')
elif data['password'] != password[0]:
client.send('Login failed')
else:
print 'Login Successful\n'
client.send('Thank you for connecting')
flag = 1
break
request_key_value_pair = json.loads(client.recv(size))
if (request_key_value_pair['request']=="Logout"):
print "closing connection"
client.close()
running = 0
flag = 0
print 'Request is ' + str(request_key_value_pair)
master_node = getMaster()
print master_node+'is serving the request'
host,port = masters[master_node].partition(":")[::2]
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print host,port
sock.sendto(json.dumps(request_key_value_pair), (host,int(port)))
sock.close()
#client.send("successfully received input data and request")
if __name__ == "__main__":
conn = sqlite3.connect('authentication_info.db')
c = conn.cursor()
#c.execute('''DROP TABLE user_info''')
c.execute("CREATE TABLE user_info (username text, password text)")
c.execute("INSERT INTO user_info values('shashank','goud')")
c.execute("INSERT INTO user_info values('ankit','bhandari')")
c.execute("INSERT INTO user_info values('kaustubh','sant')")
c.execute("INSERT INTO user_info values('nikhil','chintapallee')")
conn.commit()
conn.close()
s = Server()
s.run()
| mit |
GeyerA/android_external_chromium_org | chrome/test/functional/media/media_seek_perf.py | 56 | 4190 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Seek performance testing for <video>.
Calculates the short and long seek times for different video formats on
different network constraints.
"""
import logging
import os
import posixpath
import pyauto_media
import pyauto_utils
import cns_test_base
import worker_thread
# Number of threads to use during testing.
_TEST_THREADS = 3
# HTML test path; relative to src/chrome/test/data.
_TEST_HTML_PATH = os.path.join('media', 'html', 'media_seek.html')
# The media files used for testing.
# Path under CNS root folder (pyauto_private/media).
_TEST_VIDEOS = [posixpath.join('dartmoor', name) for name in
['dartmoor2.mp3', 'dartmoor2.wav']]
_TEST_VIDEOS.extend([posixpath.join('crowd', name) for name in
['crowd1080.webm', 'crowd1080.ogv', 'crowd1080.mp4',
'crowd360.webm', 'crowd360.ogv', 'crowd360.mp4']])
# Constraints to run tests on.
_TESTS_TO_RUN = [
cns_test_base.Wifi,
cns_test_base.NoConstraints]
class SeekWorkerThread(worker_thread.WorkerThread):
"""Worker thread. Runs a test for each task in the queue."""
def RunTask(self, unique_url, task):
"""Runs the specific task on the url given.
It is assumed that a tab with the unique_url is already loaded.
Args:
unique_url: A unique identifier of the test page.
task: A (series_name, settings, file_name) tuple to run the test on.
"""
series_name, settings, file_name = task
video_url = cns_test_base.GetFileURL(
file_name, bandwidth=settings[0], latency=settings[1],
loss=settings[2])
# Start the test!
self.CallJavascriptFunc('startTest', [video_url], unique_url)
logging.debug('Running perf test for %s.', video_url)
# Time out is dependent on (seeking time * iterations). For 3 iterations
# per seek we get total of 18 seeks per test. We expect buffered and
# cached seeks to be fast. Through experimentation an average of 10 secs
# per seek was found to be adequate.
if not self.WaitUntil(self.GetDOMValue, args=['endTest', unique_url],
retry_sleep=5, timeout=300, debug=False):
error_msg = 'Seek tests timed out.'
else:
error_msg = self.GetDOMValue('errorMsg', unique_url)
cached_states = self.GetDOMValue(
"Object.keys(CachedState).join(',')", unique_url).split(',')
seek_test_cases = self.GetDOMValue(
"Object.keys(SeekTestCase).join(',')", unique_url).split(',')
graph_name = series_name + '_' + os.path.basename(file_name)
for state in cached_states:
for seek_case in seek_test_cases:
values = self.GetDOMValue(
"seekRecords[CachedState.%s][SeekTestCase.%s].join(',')" %
(state, seek_case), unique_url)
if values:
results = [float(value) for value in values.split(',')]
else:
results = []
pyauto_utils.PrintPerfResult('seek_%s_%s' % (state.lower(),
seek_case.lower()), graph_name,
results, 'ms')
if error_msg:
logging.error('Error while running %s: %s.', graph_name, error_msg)
return False
else:
return True
class MediaSeekPerfTest(cns_test_base.CNSTestBase):
"""PyAuto test container. See file doc string for more information."""
def __init__(self, *args, **kwargs):
"""Initialize the CNSTestBase with socket_timeout = 60 secs."""
cns_test_base.CNSTestBase.__init__(self, socket_timeout='60',
*args, **kwargs)
def testMediaSeekPerformance(self):
"""Launches HTML test which plays each video and records seek stats."""
tasks = cns_test_base.CreateCNSPerfTasks(_TESTS_TO_RUN, _TEST_VIDEOS)
if worker_thread.RunWorkerThreads(self, SeekWorkerThread, tasks,
_TEST_THREADS, _TEST_HTML_PATH):
self.fail('Some tests failed to run as expected.')
if __name__ == '__main__':
pyauto_media.Main()
| bsd-3-clause |
campbe13/openhatch | vendor/packages/twisted/twisted/test/test_pbfailure.py | 18 | 13609 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for error handling in PB.
"""
from twisted.trial import unittest
from twisted.spread import pb, flavors, jelly
from twisted.internet import reactor, defer
from twisted.python import log
##
# test exceptions
##
class AsynchronousException(Exception):
"""
Helper used to test remote methods which return Deferreds which fail with
exceptions which are not L{pb.Error} subclasses.
"""
class SynchronousException(Exception):
"""
Helper used to test remote methods which raise exceptions which are not
L{pb.Error} subclasses.
"""
class AsynchronousError(pb.Error):
"""
Helper used to test remote methods which return Deferreds which fail with
exceptions which are L{pb.Error} subclasses.
"""
class SynchronousError(pb.Error):
"""
Helper used to test remote methods which raise exceptions which are
L{pb.Error} subclasses.
"""
#class JellyError(flavors.Jellyable, pb.Error): pass
class JellyError(flavors.Jellyable, pb.Error, pb.RemoteCopy):
pass
class SecurityError(pb.Error, pb.RemoteCopy):
pass
pb.setUnjellyableForClass(JellyError, JellyError)
pb.setUnjellyableForClass(SecurityError, SecurityError)
pb.globalSecurity.allowInstancesOf(SecurityError)
####
# server-side
####
class SimpleRoot(pb.Root):
def remote_asynchronousException(self):
"""
Fail asynchronously with a non-pb.Error exception.
"""
return defer.fail(AsynchronousException("remote asynchronous exception"))
def remote_synchronousException(self):
"""
Fail synchronously with a non-pb.Error exception.
"""
raise SynchronousException("remote synchronous exception")
def remote_asynchronousError(self):
"""
Fail asynchronously with a pb.Error exception.
"""
return defer.fail(AsynchronousError("remote asynchronous error"))
def remote_synchronousError(self):
"""
Fail synchronously with a pb.Error exception.
"""
raise SynchronousError("remote synchronous error")
def remote_unknownError(self):
"""
Fail with error that is not known to client.
"""
class UnknownError(pb.Error):
pass
raise UnknownError("I'm not known to client!")
def remote_jelly(self):
self.raiseJelly()
def remote_security(self):
self.raiseSecurity()
def remote_deferredJelly(self):
d = defer.Deferred()
d.addCallback(self.raiseJelly)
d.callback(None)
return d
def remote_deferredSecurity(self):
d = defer.Deferred()
d.addCallback(self.raiseSecurity)
d.callback(None)
return d
def raiseJelly(self, results=None):
raise JellyError("I'm jellyable!")
def raiseSecurity(self, results=None):
raise SecurityError("I'm secure!")
class SaveProtocolServerFactory(pb.PBServerFactory):
"""
A L{pb.PBServerFactory} that saves the latest connected client in
C{protocolInstance}.
"""
protocolInstance = None
def clientConnectionMade(self, protocol):
"""
Keep track of the given protocol.
"""
self.protocolInstance = protocol
class PBConnTestCase(unittest.TestCase):
unsafeTracebacks = 0
def setUp(self):
self._setUpServer()
self._setUpClient()
def _setUpServer(self):
self.serverFactory = SaveProtocolServerFactory(SimpleRoot())
self.serverFactory.unsafeTracebacks = self.unsafeTracebacks
self.serverPort = reactor.listenTCP(0, self.serverFactory, interface="127.0.0.1")
def _setUpClient(self):
portNo = self.serverPort.getHost().port
self.clientFactory = pb.PBClientFactory()
self.clientConnector = reactor.connectTCP("127.0.0.1", portNo, self.clientFactory)
def tearDown(self):
if self.serverFactory.protocolInstance is not None:
self.serverFactory.protocolInstance.transport.loseConnection()
return defer.gatherResults([
self._tearDownServer(),
self._tearDownClient()])
def _tearDownServer(self):
return defer.maybeDeferred(self.serverPort.stopListening)
def _tearDownClient(self):
self.clientConnector.disconnect()
return defer.succeed(None)
class PBFailureTest(PBConnTestCase):
compare = unittest.TestCase.assertEquals
def _exceptionTest(self, method, exceptionType, flush):
def eb(err):
err.trap(exceptionType)
self.compare(err.traceback, "Traceback unavailable\n")
if flush:
errs = self.flushLoggedErrors(exceptionType)
self.assertEqual(len(errs), 1)
return (err.type, err.value, err.traceback)
d = self.clientFactory.getRootObject()
def gotRootObject(root):
d = root.callRemote(method)
d.addErrback(eb)
return d
d.addCallback(gotRootObject)
return d
def test_asynchronousException(self):
"""
Test that a Deferred returned by a remote method which already has a
Failure correctly has that error passed back to the calling side.
"""
return self._exceptionTest(
'asynchronousException', AsynchronousException, True)
def test_synchronousException(self):
"""
Like L{test_asynchronousException}, but for a method which raises an
exception synchronously.
"""
return self._exceptionTest(
'synchronousException', SynchronousException, True)
def test_asynchronousError(self):
"""
Like L{test_asynchronousException}, but for a method which returns a
Deferred failing with an L{pb.Error} subclass.
"""
return self._exceptionTest(
'asynchronousError', AsynchronousError, False)
def test_synchronousError(self):
"""
Like L{test_asynchronousError}, but for a method which synchronously
raises a L{pb.Error} subclass.
"""
return self._exceptionTest(
'synchronousError', SynchronousError, False)
def _success(self, result, expectedResult):
self.assertEquals(result, expectedResult)
return result
def _addFailingCallbacks(self, remoteCall, expectedResult, eb):
remoteCall.addCallbacks(self._success, eb,
callbackArgs=(expectedResult,))
return remoteCall
def _testImpl(self, method, expected, eb, exc=None):
"""
Call the given remote method and attach the given errback to the
resulting Deferred. If C{exc} is not None, also assert that one
exception of that type was logged.
"""
rootDeferred = self.clientFactory.getRootObject()
def gotRootObj(obj):
failureDeferred = self._addFailingCallbacks(obj.callRemote(method), expected, eb)
if exc is not None:
def gotFailure(err):
self.assertEquals(len(self.flushLoggedErrors(exc)), 1)
return err
failureDeferred.addBoth(gotFailure)
return failureDeferred
rootDeferred.addCallback(gotRootObj)
return rootDeferred
def test_jellyFailure(self):
"""
Test that an exception which is a subclass of L{pb.Error} has more
information passed across the network to the calling side.
"""
def failureJelly(fail):
fail.trap(JellyError)
self.failIf(isinstance(fail.type, str))
self.failUnless(isinstance(fail.value, fail.type))
return 43
return self._testImpl('jelly', 43, failureJelly)
def test_deferredJellyFailure(self):
"""
Test that a Deferred which fails with a L{pb.Error} is treated in
the same way as a synchronously raised L{pb.Error}.
"""
def failureDeferredJelly(fail):
fail.trap(JellyError)
self.failIf(isinstance(fail.type, str))
self.failUnless(isinstance(fail.value, fail.type))
return 430
return self._testImpl('deferredJelly', 430, failureDeferredJelly)
def test_unjellyableFailure(self):
"""
An non-jellyable L{pb.Error} subclass raised by a remote method is
turned into a Failure with a type set to the FQPN of the exception
type.
"""
def failureUnjellyable(fail):
self.assertEqual(
fail.type, 'twisted.test.test_pbfailure.SynchronousError')
return 431
return self._testImpl('synchronousError', 431, failureUnjellyable)
def test_unknownFailure(self):
"""
Test that an exception which is a subclass of L{pb.Error} but not
known on the client side has its type set properly.
"""
def failureUnknown(fail):
self.assertEqual(
fail.type, 'twisted.test.test_pbfailure.UnknownError')
return 4310
return self._testImpl('unknownError', 4310, failureUnknown)
def test_securityFailure(self):
"""
Test that even if an exception is not explicitly jellyable (by being
a L{pb.Jellyable} subclass), as long as it is an L{pb.Error}
subclass it receives the same special treatment.
"""
def failureSecurity(fail):
fail.trap(SecurityError)
self.failIf(isinstance(fail.type, str))
self.failUnless(isinstance(fail.value, fail.type))
return 4300
return self._testImpl('security', 4300, failureSecurity)
def test_deferredSecurity(self):
"""
Test that a Deferred which fails with a L{pb.Error} which is not
also a L{pb.Jellyable} is treated in the same way as a synchronously
raised exception of the same type.
"""
def failureDeferredSecurity(fail):
fail.trap(SecurityError)
self.failIf(isinstance(fail.type, str))
self.failUnless(isinstance(fail.value, fail.type))
return 43000
return self._testImpl('deferredSecurity', 43000, failureDeferredSecurity)
def test_noSuchMethodFailure(self):
"""
Test that attempting to call a method which is not defined correctly
results in an AttributeError on the calling side.
"""
def failureNoSuch(fail):
fail.trap(pb.NoSuchMethod)
self.compare(fail.traceback, "Traceback unavailable\n")
return 42000
return self._testImpl('nosuch', 42000, failureNoSuch, AttributeError)
def test_copiedFailureLogging(self):
"""
Test that a copied failure received from a PB call can be logged
locally.
Note: this test needs some serious help: all it really tests is that
log.err(copiedFailure) doesn't raise an exception.
"""
d = self.clientFactory.getRootObject()
def connected(rootObj):
return rootObj.callRemote('synchronousException')
d.addCallback(connected)
def exception(failure):
log.err(failure)
errs = self.flushLoggedErrors(SynchronousException)
self.assertEquals(len(errs), 2)
d.addErrback(exception)
return d
class PBFailureTestUnsafe(PBFailureTest):
compare = unittest.TestCase.failIfEquals
unsafeTracebacks = 1
class DummyInvoker(object):
"""
A behaviorless object to be used as the invoker parameter to
L{jelly.jelly}.
"""
serializingPerspective = None
class FailureJellyingTests(unittest.TestCase):
"""
Tests for the interaction of jelly and failures.
"""
def test_unjelliedFailureCheck(self):
"""
An unjellied L{CopyableFailure} has a check method which behaves the
same way as the original L{CopyableFailure}'s check method.
"""
original = pb.CopyableFailure(ZeroDivisionError())
self.assertIdentical(
original.check(ZeroDivisionError), ZeroDivisionError)
self.assertIdentical(original.check(ArithmeticError), ArithmeticError)
copied = jelly.unjelly(jelly.jelly(original, invoker=DummyInvoker()))
self.assertIdentical(
copied.check(ZeroDivisionError), ZeroDivisionError)
self.assertIdentical(copied.check(ArithmeticError), ArithmeticError)
def test_twiceUnjelliedFailureCheck(self):
"""
The object which results from jellying a L{CopyableFailure}, unjellying
the result, creating a new L{CopyableFailure} from the result of that,
jellying it, and finally unjellying the result of that has a check
method which behaves the same way as the original L{CopyableFailure}'s
check method.
"""
original = pb.CopyableFailure(ZeroDivisionError())
self.assertIdentical(
original.check(ZeroDivisionError), ZeroDivisionError)
self.assertIdentical(original.check(ArithmeticError), ArithmeticError)
copiedOnce = jelly.unjelly(
jelly.jelly(original, invoker=DummyInvoker()))
derivative = pb.CopyableFailure(copiedOnce)
copiedTwice = jelly.unjelly(
jelly.jelly(derivative, invoker=DummyInvoker()))
self.assertIdentical(
copiedTwice.check(ZeroDivisionError), ZeroDivisionError)
self.assertIdentical(
copiedTwice.check(ArithmeticError), ArithmeticError)
| agpl-3.0 |
gopal1cloud/neutron | neutron/db/migration/alembic_migrations/versions/52ff27f7567a_support_for_vpnaas.py | 3 | 7289 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Support for VPNaaS
Revision ID: 52ff27f7567a
Revises: 39cf3f799352
Create Date: 2013-07-14 23:04:13.395955
"""
# revision identifiers, used by Alembic.
revision = '52ff27f7567a'
down_revision = '39cf3f799352'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.services.vpn.plugin.VPNDriverPlugin',
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
'ikepolicies',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column(
'auth_algorithm',
sa.Enum('sha1', name='vpn_auth_algorithms'), nullable=False),
sa.Column(
'encryption_algorithm',
sa.Enum('3des', 'aes-128', 'aes-256', 'aes-192',
name='vpn_encrypt_algorithms'), nullable=False),
sa.Column(
'phase1_negotiation_mode',
sa.Enum('main', name='ike_phase1_mode'), nullable=False),
sa.Column(
'lifetime_units',
sa.Enum('seconds', 'kilobytes', name='vpn_lifetime_units'),
nullable=False),
sa.Column('lifetime_value', sa.Integer(), nullable=False),
sa.Column(
'ike_version',
sa.Enum('v1', 'v2', name='ike_versions'), nullable=False),
sa.Column(
'pfs',
sa.Enum('group2', 'group5', 'group14', name='vpn_pfs'),
nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'ipsecpolicies',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column(
'transform_protocol',
sa.Enum('esp', 'ah', 'ah-esp', name='ipsec_transform_protocols'),
nullable=False),
sa.Column(
'auth_algorithm',
sa.Enum('sha1', name='vpn_auth_algorithms'), nullable=False),
sa.Column(
'encryption_algorithm',
sa.Enum(
'3des', 'aes-128',
'aes-256', 'aes-192', name='vpn_encrypt_algorithms'),
nullable=False),
sa.Column(
'encapsulation_mode',
sa.Enum('tunnel', 'transport', name='ipsec_encapsulations'),
nullable=False),
sa.Column(
'lifetime_units',
sa.Enum(
'seconds', 'kilobytes',
name='vpn_lifetime_units'), nullable=False),
sa.Column(
'lifetime_value', sa.Integer(), nullable=False),
sa.Column(
'pfs',
sa.Enum(
'group2', 'group5', 'group14', name='vpn_pfs'),
nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'vpnservices',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column('status', sa.String(length=16), nullable=False),
sa.Column('admin_state_up', sa.Boolean(), nullable=False),
sa.Column('subnet_id', sa.String(length=36), nullable=False),
sa.Column('router_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ),
sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'ipsec_site_connections',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column('peer_address', sa.String(length=64), nullable=False),
sa.Column('peer_id', sa.String(length=255), nullable=False),
sa.Column('route_mode', sa.String(length=8), nullable=False),
sa.Column('mtu', sa.Integer(), nullable=False),
sa.Column(
'initiator',
sa.Enum(
'bi-directional', 'response-only', name='vpn_initiators'),
nullable=False),
sa.Column('auth_mode', sa.String(length=16), nullable=False),
sa.Column('psk', sa.String(length=255), nullable=False),
sa.Column(
'dpd_action',
sa.Enum(
'hold', 'clear', 'restart',
'disabled', 'restart-by-peer', name='vpn_dpd_actions'),
nullable=False),
sa.Column('dpd_interval', sa.Integer(), nullable=False),
sa.Column('dpd_timeout', sa.Integer(), nullable=False),
sa.Column('status', sa.String(length=16), nullable=False),
sa.Column('admin_state_up', sa.Boolean(), nullable=False),
sa.Column('vpnservice_id', sa.String(length=36), nullable=False),
sa.Column('ipsecpolicy_id', sa.String(length=36), nullable=False),
sa.Column('ikepolicy_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['ikepolicy_id'], ['ikepolicies.id']),
sa.ForeignKeyConstraint(['ipsecpolicy_id'], ['ipsecpolicies.id']),
sa.ForeignKeyConstraint(['vpnservice_id'], ['vpnservices.id']),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'ipsecpeercidrs',
sa.Column('cidr', sa.String(length=32), nullable=False),
sa.Column('ipsec_site_connection_id',
sa.String(length=36),
nullable=False),
sa.ForeignKeyConstraint(['ipsec_site_connection_id'],
['ipsec_site_connections.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('cidr', 'ipsec_site_connection_id')
)
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_table('ipsecpeercidrs')
op.drop_table('ipsec_site_connections')
op.drop_table('vpnservices')
op.drop_table('ipsecpolicies')
op.drop_table('ikepolicies')
| apache-2.0 |
cherez/youtube-dl | youtube_dl/extractor/macgamestore.py | 142 | 1275 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import ExtractorError
class MacGameStoreIE(InfoExtractor):
IE_NAME = 'macgamestore'
IE_DESC = 'MacGameStore trailers'
_VALID_URL = r'https?://www\.macgamestore\.com/mediaviewer\.php\?trailer=(?P<id>\d+)'
_TEST = {
'url': 'http://www.macgamestore.com/mediaviewer.php?trailer=2450',
'md5': '8649b8ea684b6666b4c5be736ecddc61',
'info_dict': {
'id': '2450',
'ext': 'm4v',
'title': 'Crow',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
url, video_id, 'Downloading trailer page')
if '>Missing Media<' in webpage:
raise ExtractorError(
'Trailer %s does not exist' % video_id, expected=True)
video_title = self._html_search_regex(
r'<title>MacGameStore: (.*?) Trailer</title>', webpage, 'title')
video_url = self._html_search_regex(
r'(?s)<div\s+id="video-player".*?href="([^"]+)"\s*>',
webpage, 'video URL')
return {
'id': video_id,
'url': video_url,
'title': video_title
}
| unlicense |
tszym/ansible | lib/ansible/modules/notification/slack.py | 8 | 10761 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Steve Pletcher <steve@steve-pletcher.com>
# (c) 2016, René Moser <mail@renemoser.net>
# (c) 2015, Stefan Berggren <nsg@nsg.cc>
# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = """
module: slack
short_description: Send Slack notifications
description:
- The C(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration
version_added: "1.6"
author: "Ramon de la Fuente (@ramondelafuente)"
options:
domain:
description:
- Slack (sub)domain for your environment without protocol. (i.e.
C(example.slack.com)) In 1.8 and beyond, this is deprecated and may
be ignored. See token documentation for information.
required: false
default: None
token:
description:
- Slack integration token. This authenticates you to the slack service.
Prior to 1.8, a token looked like C(3Ffe373sfhRE6y42Fg3rvf4GlK). In
1.8 and above, ansible adapts to the new slack API where tokens look
like C(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens
are in the new format then slack will ignore any value of domain. If
the token is in the old format the domain is required. Ansible has no
control of when slack will get rid of the old API. When slack does
that the old format will stop working. ** Please keep in mind the tokens
are not the API tokens but are the webhook tokens. In slack these are
found in the webhook URL which are obtained under the apps and integrations.
The incoming webhooks can be added in that area. In some cases this may
be locked by your Slack admin and you must request access. It is there
that the incoming webhooks can be added. The key is on the end of the
URL given to you in that section.
required: true
msg:
description:
- Message to send. Note that the module does not handle escaping characters.
Plain-text angle brackets and ampersands should be converted to HTML entities (e.g. & to &) before sending.
See Slack's documentation (U(https://api.slack.com/docs/message-formatting)) for more.
required: false
default: None
channel:
description:
- Channel to send the message to. If absent, the message goes to the channel selected for the I(token).
required: false
default: None
username:
description:
- This is the sender of the message.
required: false
default: "Ansible"
icon_url:
description:
- Url for the message sender's icon (default C(https://www.ansible.com/favicon.ico))
required: false
icon_emoji:
description:
- Emoji for the message sender. See Slack documentation for options.
(if I(icon_emoji) is set, I(icon_url) will not be used)
required: false
default: None
link_names:
description:
- Automatically create links for channels and usernames in I(msg).
required: false
default: 1
choices:
- 1
- 0
parse:
description:
- Setting for the message parser at Slack
required: false
default: None
choices:
- 'full'
- 'none'
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices:
- 'yes'
- 'no'
color:
version_added: "2.0"
description:
- Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message
required: false
default: 'normal'
choices:
- 'normal'
- 'good'
- 'warning'
- 'danger'
attachments:
description:
- Define a list of attachments. This list mirrors the Slack JSON API. For more information, see https://api.slack.com/docs/attachments
required: false
default: None
"""
EXAMPLES = """
- name: Send notification message via Slack
slack:
token: thetoken/generatedby/slack
msg: '{{ inventory_hostname }} completed'
delegate_to: localhost
- name: Send notification message via Slack all options
slack:
token: thetoken/generatedby/slack
msg: '{{ inventory_hostname }} completed'
channel: #ansible
username: 'Ansible on {{ inventory_hostname }}'
icon_url: http://www.example.com/some-image-file.png
link_names: 0
parse: 'none'
delegate_to: localhost
- name: insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in Slack
slack:
token: thetoken/generatedby/slack
msg: '{{ inventory_hostname }} is alive!'
color: good
username: ''
icon_url: ''
- name: Use the attachments API
slack:
token: thetoken/generatedby/slack
attachments:
- text: Display my system load on host A and B
color: #ff00dd
title: System load
fields:
- title: System A
value: "load average: 0,74, 0,66, 0,63"
short: True
- title: System B
value: 'load average: 5,16, 4,64, 2,43'
short: True
- name: Send a message with a link using Slack markup
slack:
token: thetoken/generatedby/slack
msg: We sent this message using <https://www.ansible.com|Ansible>!
- name: Send a message with angle brackets and ampersands
slack:
token: thetoken/generatedby/slack
msg: This message has <brackets> & ampersands in plain text.
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s'
SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s'
# Escaping quotes and apostrophes to avoid ending string prematurely in ansible call.
# We do not escape other characters used as Slack metacharacters (e.g. &, <, >).
escape_table = {
'"': "\"",
"'": "\'",
}
def escape_quotes(text):
'''Backslash any quotes within text.'''
return "".join(escape_table.get(c,c) for c in text)
def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color, attachments):
payload = {}
if color == "normal" and text is not None:
payload = dict(text=escape_quotes(text))
elif text is not None:
# With a custom color we have to set the message as attachment, and explicitly turn markdown parsing on for it.
payload = dict(attachments=[dict(text=escape_quotes(text), color=color, mrkdwn_in=["text"])])
if channel is not None:
if (channel[0] == '#') or (channel[0] == '@'):
payload['channel'] = channel
else:
payload['channel'] = '#'+channel
if username is not None:
payload['username'] = username
if icon_emoji is not None:
payload['icon_emoji'] = icon_emoji
else:
payload['icon_url'] = icon_url
if link_names is not None:
payload['link_names'] = link_names
if parse is not None:
payload['parse'] = parse
if attachments is not None:
if 'attachments' not in payload:
payload['attachments'] = []
if attachments is not None:
keys_to_escape = [
'title',
'text',
'author_name',
'pretext',
'fallback',
]
for attachment in attachments:
for key in keys_to_escape:
if key in attachment:
attachment[key] = escape_quotes(attachment[key])
if 'fallback' not in attachment:
attachment['fallback'] = attachment['text']
payload['attachments'].append(attachment)
payload=module.jsonify(payload)
return payload
def do_notify_slack(module, domain, token, payload):
if token.count('/') >= 2:
# New style token
slack_incoming_webhook = SLACK_INCOMING_WEBHOOK % (token)
else:
if not domain:
module.fail_json(msg="Slack has updated its webhook API. You need to specify a token of the form XXXX/YYYY/ZZZZ in your playbook")
slack_incoming_webhook = OLD_SLACK_INCOMING_WEBHOOK % (domain, token)
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
response, info = fetch_url(module=module, url=slack_incoming_webhook, headers=headers, method='POST', data=payload)
if info['status'] != 200:
obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % ('[obscured]')
module.fail_json(msg=" failed to send %s to %s: %s" % (payload, obscured_incoming_webhook, info['msg']))
def main():
module = AnsibleModule(
argument_spec = dict(
domain = dict(type='str', required=False, default=None),
token = dict(type='str', required=True, no_log=True),
msg = dict(type='str', required=False, default=None),
channel = dict(type='str', default=None),
username = dict(type='str', default='Ansible'),
icon_url = dict(type='str', default='https://www.ansible.com/favicon.ico'),
icon_emoji = dict(type='str', default=None),
link_names = dict(type='int', default=1, choices=[0,1]),
parse = dict(type='str', default=None, choices=['none', 'full']),
validate_certs = dict(default='yes', type='bool'),
color = dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']),
attachments = dict(type='list', required=False, default=None)
)
)
domain = module.params['domain']
token = module.params['token']
text = module.params['msg']
channel = module.params['channel']
username = module.params['username']
icon_url = module.params['icon_url']
icon_emoji = module.params['icon_emoji']
link_names = module.params['link_names']
parse = module.params['parse']
color = module.params['color']
attachments = module.params['attachments']
payload = build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color, attachments)
do_notify_slack(module, domain, token, payload)
module.exit_json(msg="OK")
if __name__ == '__main__':
main()
| gpl-3.0 |
cuongthai/cuongthai-s-blog | docutils/parsers/rst/states.py | 2 | 132278 | # $Id: states.py 7072 2011-07-06 15:52:30Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
This is the ``docutils.parsers.rst.states`` module, the core of
the reStructuredText parser. It defines the following:
:Classes:
- `RSTStateMachine`: reStructuredText parser's entry point.
- `NestedStateMachine`: recursive StateMachine.
- `RSTState`: reStructuredText State superclass.
- `Inliner`: For parsing inline markup.
- `Body`: Generic classifier of the first line of a block.
- `SpecializedBody`: Superclass for compound element members.
- `BulletList`: Second and subsequent bullet_list list_items
- `DefinitionList`: Second+ definition_list_items.
- `EnumeratedList`: Second+ enumerated_list list_items.
- `FieldList`: Second+ fields.
- `OptionList`: Second+ option_list_items.
- `RFC2822List`: Second+ RFC2822-style fields.
- `ExtensionOptions`: Parses directive option fields.
- `Explicit`: Second+ explicit markup constructs.
- `SubstitutionDef`: For embedded directives in substitution definitions.
- `Text`: Classifier of second line of a text block.
- `SpecializedText`: Superclass for continuation lines of Text-variants.
- `Definition`: Second line of potential definition_list_item.
- `Line`: Second line of overlined section title or transition marker.
- `Struct`: An auxiliary collection class.
:Exception classes:
- `MarkupError`
- `ParserError`
- `MarkupMismatch`
:Functions:
- `escape2null()`: Return a string, escape-backslashes converted to nulls.
- `unescape()`: Return a string, nulls removed or restored to backslashes.
:Attributes:
- `state_classes`: set of State classes used with `RSTStateMachine`.
Parser Overview
===============
The reStructuredText parser is implemented as a recursive state machine,
examining its input one line at a time. To understand how the parser works,
please first become familiar with the `docutils.statemachine` module. In the
description below, references are made to classes defined in this module;
please see the individual classes for details.
Parsing proceeds as follows:
1. The state machine examines each line of input, checking each of the
transition patterns of the state `Body`, in order, looking for a match.
The implicit transitions (blank lines and indentation) are checked before
any others. The 'text' transition is a catch-all (matches anything).
2. The method associated with the matched transition pattern is called.
A. Some transition methods are self-contained, appending elements to the
document tree (`Body.doctest` parses a doctest block). The parser's
current line index is advanced to the end of the element, and parsing
continues with step 1.
B. Other transition methods trigger the creation of a nested state machine,
whose job is to parse a compound construct ('indent' does a block quote,
'bullet' does a bullet list, 'overline' does a section [first checking
for a valid section header], etc.).
- In the case of lists and explicit markup, a one-off state machine is
created and run to parse contents of the first item.
- A new state machine is created and its initial state is set to the
appropriate specialized state (`BulletList` in the case of the
'bullet' transition; see `SpecializedBody` for more detail). This
state machine is run to parse the compound element (or series of
explicit markup elements), and returns as soon as a non-member element
is encountered. For example, the `BulletList` state machine ends as
soon as it encounters an element which is not a list item of that
bullet list. The optional omission of inter-element blank lines is
enabled by this nested state machine.
- The current line index is advanced to the end of the elements parsed,
and parsing continues with step 1.
C. The result of the 'text' transition depends on the next line of text.
The current state is changed to `Text`, under which the second line is
examined. If the second line is:
- Indented: The element is a definition list item, and parsing proceeds
similarly to step 2.B, using the `DefinitionList` state.
- A line of uniform punctuation characters: The element is a section
header; again, parsing proceeds as in step 2.B, and `Body` is still
used.
- Anything else: The element is a paragraph, which is examined for
inline markup and appended to the parent element. Processing
continues with step 1.
"""
__docformat__ = 'reStructuredText'
import sys
import re
import roman
from types import FunctionType, MethodType
from docutils import nodes, statemachine, utils, urischemes
from docutils import ApplicationError, DataError
from docutils.statemachine import StateMachineWS, StateWS
from docutils.nodes import fully_normalize_name as normalize_name
from docutils.nodes import whitespace_normalize_name
from docutils.utils import escape2null, unescape, column_width
import docutils.parsers.rst
from docutils.parsers.rst import directives, languages, tableparser, roles
from docutils.parsers.rst.languages import en as _fallback_language_module
class MarkupError(DataError): pass
class UnknownInterpretedRoleError(DataError): pass
class InterpretedRoleNotImplementedError(DataError): pass
class ParserError(ApplicationError): pass
class MarkupMismatch(Exception): pass
class Struct:
"""Stores data attributes for dotted-attribute access."""
def __init__(self, **keywordargs):
self.__dict__.update(keywordargs)
class RSTStateMachine(StateMachineWS):
"""
reStructuredText's master StateMachine.
The entry point to reStructuredText parsing is the `run()` method.
"""
def run(self, input_lines, document, input_offset=0, match_titles=1,
inliner=None):
"""
Parse `input_lines` and modify the `document` node in place.
Extend `StateMachineWS.run()`: set up parse-global data and
run the StateMachine.
"""
self.language = languages.get_language(
document.settings.language_code)
self.match_titles = match_titles
if inliner is None:
inliner = Inliner()
inliner.init_customizations(document.settings)
self.memo = Struct(document=document,
reporter=document.reporter,
language=self.language,
title_styles=[],
section_level=0,
section_bubble_up_kludge=0,
inliner=inliner)
self.document = document
self.attach_observer(document.note_source)
self.reporter = self.memo.reporter
self.node = document
results = StateMachineWS.run(self, input_lines, input_offset,
input_source=document['source'])
assert results == [], 'RSTStateMachine.run() results should be empty!'
self.node = self.memo = None # remove unneeded references
class NestedStateMachine(StateMachineWS):
"""
StateMachine run from within other StateMachine runs, to parse nested
document structures.
"""
def run(self, input_lines, input_offset, memo, node, match_titles=1):
"""
Parse `input_lines` and populate a `docutils.nodes.document` instance.
Extend `StateMachineWS.run()`: set up document-wide data.
"""
self.match_titles = match_titles
self.memo = memo
self.document = memo.document
self.attach_observer(self.document.note_source)
self.reporter = memo.reporter
self.language = memo.language
self.node = node
results = StateMachineWS.run(self, input_lines, input_offset)
assert results == [], ('NestedStateMachine.run() results should be '
'empty!')
return results
class RSTState(StateWS):
"""
reStructuredText State superclass.
Contains methods used by all State subclasses.
"""
nested_sm = NestedStateMachine
nested_sm_cache = []
def __init__(self, state_machine, debug=0):
self.nested_sm_kwargs = {'state_classes': state_classes,
'initial_state': 'Body'}
StateWS.__init__(self, state_machine, debug)
def runtime_init(self):
StateWS.runtime_init(self)
memo = self.state_machine.memo
self.memo = memo
self.reporter = memo.reporter
self.inliner = memo.inliner
self.document = memo.document
self.parent = self.state_machine.node
# enable the reporter to determine source and source-line
if not hasattr(self.reporter, 'locator'):
self.reporter.locator = self.state_machine.get_source_and_line
# print "adding locator to reporter", self.state_machine.input_offset
def goto_line(self, abs_line_offset):
"""
Jump to input line `abs_line_offset`, ignoring jumps past the end.
"""
try:
self.state_machine.goto_line(abs_line_offset)
except EOFError:
pass
def no_match(self, context, transitions):
"""
Override `StateWS.no_match` to generate a system message.
This code should never be run.
"""
src, srcline = self.state_machine.get_source_and_line()
self.reporter.severe(
'Internal error: no transition pattern match. State: "%s"; '
'transitions: %s; context: %s; current line: %r.'
% (self.__class__.__name__, transitions, context,
self.state_machine.line),
source=src, line=srcline)
return context, None, []
def bof(self, context):
"""Called at beginning of file."""
return [], []
def nested_parse(self, block, input_offset, node, match_titles=0,
state_machine_class=None, state_machine_kwargs=None):
"""
Create a new StateMachine rooted at `node` and run it over the input
`block`.
"""
use_default = 0
if state_machine_class is None:
state_machine_class = self.nested_sm
use_default += 1
if state_machine_kwargs is None:
state_machine_kwargs = self.nested_sm_kwargs
use_default += 1
block_length = len(block)
state_machine = None
if use_default == 2:
try:
state_machine = self.nested_sm_cache.pop()
except IndexError:
pass
if not state_machine:
state_machine = state_machine_class(debug=self.debug,
**state_machine_kwargs)
state_machine.run(block, input_offset, memo=self.memo,
node=node, match_titles=match_titles)
if use_default == 2:
self.nested_sm_cache.append(state_machine)
else:
state_machine.unlink()
new_offset = state_machine.abs_line_offset()
# No `block.parent` implies disconnected -- lines aren't in sync:
if block.parent and (len(block) - block_length) != 0:
# Adjustment for block if modified in nested parse:
self.state_machine.next_line(len(block) - block_length)
return new_offset
def nested_list_parse(self, block, input_offset, node, initial_state,
blank_finish,
blank_finish_state=None,
extra_settings={},
match_titles=0,
state_machine_class=None,
state_machine_kwargs=None):
"""
Create a new StateMachine rooted at `node` and run it over the input
`block`. Also keep track of optional intermediate blank lines and the
required final one.
"""
if state_machine_class is None:
state_machine_class = self.nested_sm
if state_machine_kwargs is None:
state_machine_kwargs = self.nested_sm_kwargs.copy()
state_machine_kwargs['initial_state'] = initial_state
state_machine = state_machine_class(debug=self.debug,
**state_machine_kwargs)
if blank_finish_state is None:
blank_finish_state = initial_state
state_machine.states[blank_finish_state].blank_finish = blank_finish
for key, value in extra_settings.items():
setattr(state_machine.states[initial_state], key, value)
state_machine.run(block, input_offset, memo=self.memo,
node=node, match_titles=match_titles)
blank_finish = state_machine.states[blank_finish_state].blank_finish
state_machine.unlink()
return state_machine.abs_line_offset(), blank_finish
def section(self, title, source, style, lineno, messages):
"""Check for a valid subsection and create one if it checks out."""
if self.check_subsection(source, style, lineno):
self.new_subsection(title, lineno, messages)
def check_subsection(self, source, style, lineno):
"""
Check for a valid subsection header. Return 1 (true) or None (false).
When a new section is reached that isn't a subsection of the current
section, back up the line count (use ``previous_line(-x)``), then
``raise EOFError``. The current StateMachine will finish, then the
calling StateMachine can re-examine the title. This will work its way
back up the calling chain until the correct section level isreached.
@@@ Alternative: Evaluate the title, store the title info & level, and
back up the chain until that level is reached. Store in memo? Or
return in results?
:Exception: `EOFError` when a sibling or supersection encountered.
"""
memo = self.memo
title_styles = memo.title_styles
mylevel = memo.section_level
try: # check for existing title style
level = title_styles.index(style) + 1
except ValueError: # new title style
if len(title_styles) == memo.section_level: # new subsection
title_styles.append(style)
return 1
else: # not at lowest level
self.parent += self.title_inconsistent(source, lineno)
return None
if level <= mylevel: # sibling or supersection
memo.section_level = level # bubble up to parent section
if len(style) == 2:
memo.section_bubble_up_kludge = 1
# back up 2 lines for underline title, 3 for overline title
self.state_machine.previous_line(len(style) + 1)
raise EOFError # let parent section re-evaluate
if level == mylevel + 1: # immediate subsection
return 1
else: # invalid subsection
self.parent += self.title_inconsistent(source, lineno)
return None
def title_inconsistent(self, sourcetext, lineno):
src, srcline = self.state_machine.get_source_and_line(lineno)
error = self.reporter.severe(
'Title level inconsistent:', nodes.literal_block('', sourcetext),
source=src, line=srcline)
return error
def new_subsection(self, title, lineno, messages):
"""Append new subsection to document tree. On return, check level."""
memo = self.memo
mylevel = memo.section_level
memo.section_level += 1
section_node = nodes.section()
self.parent += section_node
textnodes, title_messages = self.inline_text(title, lineno)
titlenode = nodes.title(title, '', *textnodes)
name = normalize_name(titlenode.astext())
section_node['names'].append(name)
section_node += titlenode
section_node += messages
section_node += title_messages
self.document.note_implicit_target(section_node, section_node)
offset = self.state_machine.line_offset + 1
absoffset = self.state_machine.abs_line_offset() + 1
newabsoffset = self.nested_parse(
self.state_machine.input_lines[offset:], input_offset=absoffset,
node=section_node, match_titles=1)
self.goto_line(newabsoffset)
if memo.section_level <= mylevel: # can't handle next section?
raise EOFError # bubble up to supersection
# reset section_level; next pass will detect it properly
memo.section_level = mylevel
def paragraph(self, lines, lineno):
"""
Return a list (paragraph & messages) & a boolean: literal_block next?
"""
data = '\n'.join(lines).rstrip()
if re.search(r'(?<!\\)(\\\\)*::$', data):
if len(data) == 2:
return [], 1
elif data[-3] in ' \n':
text = data[:-3].rstrip()
else:
text = data[:-1]
literalnext = 1
else:
text = data
literalnext = 0
textnodes, messages = self.inline_text(text, lineno)
p = nodes.paragraph(data, '', *textnodes)
p.source, p.line = self.state_machine.get_source_and_line(lineno)
return [p] + messages, literalnext
def inline_text(self, text, lineno):
"""
Return 2 lists: nodes (text and inline elements), and system_messages.
"""
return self.inliner.parse(text, lineno, self.memo, self.parent)
def unindent_warning(self, node_name):
# the actual problem is one line below the current line
src, srcline = self.state_machine.get_source_and_line()
return self.reporter.warning('%s ends without a blank line; '
'unexpected unindent.' % node_name,
source=src, line=srcline+1)
def build_regexp(definition, compile=1):
"""
Build, compile and return a regular expression based on `definition`.
:Parameter: `definition`: a 4-tuple (group name, prefix, suffix, parts),
where "parts" is a list of regular expressions and/or regular
expression definitions to be joined into an or-group.
"""
name, prefix, suffix, parts = definition
part_strings = []
for part in parts:
if type(part) is tuple:
part_strings.append(build_regexp(part, None))
else:
part_strings.append(part)
or_group = '|'.join(part_strings)
regexp = '%(prefix)s(?P<%(name)s>%(or_group)s)%(suffix)s' % locals()
if compile:
return re.compile(regexp, re.UNICODE)
else:
return regexp
class Inliner:
"""
Parse inline markup; call the `parse()` method.
"""
def __init__(self):
self.implicit_dispatch = [(self.patterns.uri, self.standalone_uri),]
"""List of (pattern, bound method) tuples, used by
`self.implicit_inline`."""
def init_customizations(self, settings):
"""Setting-based customizations; run when parsing begins."""
if settings.pep_references:
self.implicit_dispatch.append((self.patterns.pep,
self.pep_reference))
if settings.rfc_references:
self.implicit_dispatch.append((self.patterns.rfc,
self.rfc_reference))
def parse(self, text, lineno, memo, parent):
# Needs to be refactored for nested inline markup.
# Add nested_parse() method?
"""
Return 2 lists: nodes (text and inline elements), and system_messages.
Using `self.patterns.initial`, a pattern which matches start-strings
(emphasis, strong, interpreted, phrase reference, literal,
substitution reference, and inline target) and complete constructs
(simple reference, footnote reference), search for a candidate. When
one is found, check for validity (e.g., not a quoted '*' character).
If valid, search for the corresponding end string if applicable, and
check it for validity. If not found or invalid, generate a warning
and ignore the start-string. Implicit inline markup (e.g. standalone
URIs) is found last.
"""
self.reporter = memo.reporter
self.document = memo.document
self.language = memo.language
self.parent = parent
pattern_search = self.patterns.initial.search
dispatch = self.dispatch
remaining = escape2null(text)
processed = []
unprocessed = []
messages = []
while remaining:
match = pattern_search(remaining)
if match:
groups = match.groupdict()
method = dispatch[groups['start'] or groups['backquote']
or groups['refend'] or groups['fnend']]
before, inlines, remaining, sysmessages = method(self, match,
lineno)
unprocessed.append(before)
messages += sysmessages
if inlines:
processed += self.implicit_inline(''.join(unprocessed),
lineno)
processed += inlines
unprocessed = []
else:
break
remaining = ''.join(unprocessed) + remaining
if remaining:
processed += self.implicit_inline(remaining, lineno)
return processed, messages
openers = u'\'"([{<\u2018\u201c\xab\u00a1\u00bf' # see quoted_start below
closers = u'\'")]}>\u2019\u201d\xbb!?'
unicode_delimiters = u'\u2010\u2011\u2012\u2013\u2014\u00a0'
start_string_prefix = (u'((?<=^)|(?<=[-/: \\n\u2019%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(openers)))
end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(closers)))
non_whitespace_before = r'(?<![ \n])'
non_whitespace_escape_before = r'(?<![ \n\x00])'
non_unescaped_whitespace_escape_before = r'(?<!(?<!\x00)[ \n\x00])'
non_whitespace_after = r'(?![ \n])'
# Alphanumerics with isolated internal [-._+:] chars (i.e. not 2 together):
simplename = r'(?:(?!_)\w)+(?:[-._+:](?:(?!_)\w)+)*'
# Valid URI characters (see RFC 2396 & RFC 2732);
# final \x00 allows backslash escapes in URIs:
uric = r"""[-_.!~*'()[\];/:@&=+$,%a-zA-Z0-9\x00]"""
# Delimiter indicating the end of a URI (not part of the URI):
uri_end_delim = r"""[>]"""
# Last URI character; same as uric but no punctuation:
urilast = r"""[_~*/=+a-zA-Z0-9]"""
# End of a URI (either 'urilast' or 'uric followed by a
# uri_end_delim'):
uri_end = r"""(?:%(urilast)s|%(uric)s(?=%(uri_end_delim)s))""" % locals()
emailc = r"""[-_!~*'{|}/#?^`&=+$%a-zA-Z0-9\x00]"""
email_pattern = r"""
%(emailc)s+(?:\.%(emailc)s+)* # name
(?<!\x00)@ # at
%(emailc)s+(?:\.%(emailc)s*)* # host
%(uri_end)s # final URI char
"""
parts = ('initial_inline', start_string_prefix, '',
[('start', '', non_whitespace_after, # simple start-strings
[r'\*\*', # strong
r'\*(?!\*)', # emphasis but not strong
r'``', # literal
r'_`', # inline internal target
r'\|(?!\|)'] # substitution reference
),
('whole', '', end_string_suffix, # whole constructs
[# reference name & end-string
r'(?P<refname>%s)(?P<refend>__?)' % simplename,
('footnotelabel', r'\[', r'(?P<fnend>\]_)',
[r'[0-9]+', # manually numbered
r'\#(%s)?' % simplename, # auto-numbered (w/ label?)
r'\*', # auto-symbol
r'(?P<citationlabel>%s)' % simplename] # citation reference
)
]
),
('backquote', # interpreted text or phrase reference
'(?P<role>(:%s:)?)' % simplename, # optional role
non_whitespace_after,
['`(?!`)'] # but not literal
)
]
)
patterns = Struct(
initial=build_regexp(parts),
emphasis=re.compile(non_whitespace_escape_before
+ r'(\*)' + end_string_suffix),
strong=re.compile(non_whitespace_escape_before
+ r'(\*\*)' + end_string_suffix),
interpreted_or_phrase_ref=re.compile(
r"""
%(non_unescaped_whitespace_escape_before)s
(
`
(?P<suffix>
(?P<role>:%(simplename)s:)?
(?P<refend>__?)?
)
)
%(end_string_suffix)s
""" % locals(), re.VERBOSE | re.UNICODE),
embedded_uri=re.compile(
r"""
(
(?:[ \n]+|^) # spaces or beginning of line/string
< # open bracket
%(non_whitespace_after)s
([^<>\x00]+) # anything but angle brackets & nulls
%(non_whitespace_before)s
> # close bracket w/o whitespace before
)
$ # end of string
""" % locals(), re.VERBOSE),
literal=re.compile(non_whitespace_before + '(``)'
+ end_string_suffix),
target=re.compile(non_whitespace_escape_before
+ r'(`)' + end_string_suffix),
substitution_ref=re.compile(non_whitespace_escape_before
+ r'(\|_{0,2})'
+ end_string_suffix),
email=re.compile(email_pattern % locals() + '$', re.VERBOSE),
uri=re.compile(
(r"""
%(start_string_prefix)s
(?P<whole>
(?P<absolute> # absolute URI
(?P<scheme> # scheme (http, ftp, mailto)
[a-zA-Z][a-zA-Z0-9.+-]*
)
:
(
( # either:
(//?)? # hierarchical URI
%(uric)s* # URI characters
%(uri_end)s # final URI char
)
( # optional query
\?%(uric)s*
%(uri_end)s
)?
( # optional fragment
\#%(uric)s*
%(uri_end)s
)?
)
)
| # *OR*
(?P<email> # email address
""" + email_pattern + r"""
)
)
%(end_string_suffix)s
""") % locals(), re.VERBOSE),
pep=re.compile(
r"""
%(start_string_prefix)s
(
(pep-(?P<pepnum1>\d+)(.txt)?) # reference to source file
|
(PEP\s+(?P<pepnum2>\d+)) # reference by name
)
%(end_string_suffix)s""" % locals(), re.VERBOSE),
rfc=re.compile(
r"""
%(start_string_prefix)s
(RFC(-|\s+)?(?P<rfcnum>\d+))
%(end_string_suffix)s""" % locals(), re.VERBOSE))
def quoted_start(self, match):
"""Return 1 if inline markup start-string is 'quoted', 0 if not."""
string = match.string
start = match.start()
end = match.end()
if start == 0: # start-string at beginning of text
return 0
prestart = string[start - 1]
try:
poststart = string[end]
if self.openers.index(prestart) \
== self.closers.index(poststart): # quoted
return 1
except IndexError: # start-string at end of text
return 1
except ValueError: # not quoted
pass
return 0
def inline_obj(self, match, lineno, end_pattern, nodeclass,
restore_backslashes=0):
string = match.string
matchstart = match.start('start')
matchend = match.end('start')
if self.quoted_start(match):
return (string[:matchend], [], string[matchend:], [], '')
endmatch = end_pattern.search(string[matchend:])
if endmatch and endmatch.start(1): # 1 or more chars
text = unescape(endmatch.string[:endmatch.start(1)],
restore_backslashes)
textend = matchend + endmatch.end(1)
rawsource = unescape(string[matchstart:textend], 1)
return (string[:matchstart], [nodeclass(rawsource, text)],
string[textend:], [], endmatch.group(1))
msg = self.reporter.warning(
'Inline %s start-string without end-string.'
% nodeclass.__name__, line=lineno)
text = unescape(string[matchstart:matchend], 1)
rawsource = unescape(string[matchstart:matchend], 1)
prb = self.problematic(text, rawsource, msg)
return string[:matchstart], [prb], string[matchend:], [msg], ''
def problematic(self, text, rawsource, message):
msgid = self.document.set_id(message, self.parent)
problematic = nodes.problematic(rawsource, text, refid=msgid)
prbid = self.document.set_id(problematic)
message.add_backref(prbid)
return problematic
def emphasis(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.emphasis, nodes.emphasis)
return before, inlines, remaining, sysmessages
def strong(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.strong, nodes.strong)
return before, inlines, remaining, sysmessages
def interpreted_or_phrase_ref(self, match, lineno):
end_pattern = self.patterns.interpreted_or_phrase_ref
string = match.string
matchstart = match.start('backquote')
matchend = match.end('backquote')
rolestart = match.start('role')
role = match.group('role')
position = ''
if role:
role = role[1:-1]
position = 'prefix'
elif self.quoted_start(match):
return (string[:matchend], [], string[matchend:], [])
endmatch = end_pattern.search(string[matchend:])
if endmatch and endmatch.start(1): # 1 or more chars
textend = matchend + endmatch.end()
if endmatch.group('role'):
if role:
msg = self.reporter.warning(
'Multiple roles in interpreted text (both '
'prefix and suffix present; only one allowed).',
line=lineno)
text = unescape(string[rolestart:textend], 1)
prb = self.problematic(text, text, msg)
return string[:rolestart], [prb], string[textend:], [msg]
role = endmatch.group('suffix')[1:-1]
position = 'suffix'
escaped = endmatch.string[:endmatch.start(1)]
rawsource = unescape(string[matchstart:textend], 1)
if rawsource[-1:] == '_':
if role:
msg = self.reporter.warning(
'Mismatch: both interpreted text role %s and '
'reference suffix.' % position, line=lineno)
text = unescape(string[rolestart:textend], 1)
prb = self.problematic(text, text, msg)
return string[:rolestart], [prb], string[textend:], [msg]
return self.phrase_ref(string[:matchstart], string[textend:],
rawsource, escaped, unescape(escaped))
else:
rawsource = unescape(string[rolestart:textend], 1)
nodelist, messages = self.interpreted(rawsource, escaped, role,
lineno)
return (string[:rolestart], nodelist,
string[textend:], messages)
msg = self.reporter.warning(
'Inline interpreted text or phrase reference start-string '
'without end-string.', line=lineno)
text = unescape(string[matchstart:matchend], 1)
prb = self.problematic(text, text, msg)
return string[:matchstart], [prb], string[matchend:], [msg]
def phrase_ref(self, before, after, rawsource, escaped, text):
match = self.patterns.embedded_uri.search(escaped)
if match:
text = unescape(escaped[:match.start(0)])
uri_text = match.group(2)
uri = ''.join(uri_text.split())
uri = self.adjust_uri(uri)
if uri:
target = nodes.target(match.group(1), refuri=uri)
else:
raise ApplicationError('problem with URI: %r' % uri_text)
if not text:
text = uri
else:
target = None
refname = normalize_name(text)
reference = nodes.reference(rawsource, text,
name=whitespace_normalize_name(text))
node_list = [reference]
if rawsource[-2:] == '__':
if target:
reference['refuri'] = uri
else:
reference['anonymous'] = 1
else:
if target:
reference['refuri'] = uri
target['names'].append(refname)
self.document.note_explicit_target(target, self.parent)
node_list.append(target)
else:
reference['refname'] = refname
self.document.note_refname(reference)
return before, node_list, after, []
def adjust_uri(self, uri):
match = self.patterns.email.match(uri)
if match:
return 'mailto:' + uri
else:
return uri
def interpreted(self, rawsource, text, role, lineno):
role_fn, messages = roles.role(role, self.language, lineno,
self.reporter)
if role_fn:
nodes, messages2 = role_fn(role, rawsource, text, lineno, self)
return nodes, messages + messages2
else:
msg = self.reporter.error(
'Unknown interpreted text role "%s".' % role,
line=lineno)
return ([self.problematic(rawsource, rawsource, msg)],
messages + [msg])
def literal(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.literal, nodes.literal,
restore_backslashes=1)
return before, inlines, remaining, sysmessages
def inline_internal_target(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.target, nodes.target)
if inlines and isinstance(inlines[0], nodes.target):
assert len(inlines) == 1
target = inlines[0]
name = normalize_name(target.astext())
target['names'].append(name)
self.document.note_explicit_target(target, self.parent)
return before, inlines, remaining, sysmessages
def substitution_reference(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.substitution_ref,
nodes.substitution_reference)
if len(inlines) == 1:
subref_node = inlines[0]
if isinstance(subref_node, nodes.substitution_reference):
subref_text = subref_node.astext()
self.document.note_substitution_ref(subref_node, subref_text)
if endstring[-1:] == '_':
reference_node = nodes.reference(
'|%s%s' % (subref_text, endstring), '')
if endstring[-2:] == '__':
reference_node['anonymous'] = 1
else:
reference_node['refname'] = normalize_name(subref_text)
self.document.note_refname(reference_node)
reference_node += subref_node
inlines = [reference_node]
return before, inlines, remaining, sysmessages
def footnote_reference(self, match, lineno):
"""
Handles `nodes.footnote_reference` and `nodes.citation_reference`
elements.
"""
label = match.group('footnotelabel')
refname = normalize_name(label)
string = match.string
before = string[:match.start('whole')]
remaining = string[match.end('whole'):]
if match.group('citationlabel'):
refnode = nodes.citation_reference('[%s]_' % label,
refname=refname)
refnode += nodes.Text(label)
self.document.note_citation_ref(refnode)
else:
refnode = nodes.footnote_reference('[%s]_' % label)
if refname[0] == '#':
refname = refname[1:]
refnode['auto'] = 1
self.document.note_autofootnote_ref(refnode)
elif refname == '*':
refname = ''
refnode['auto'] = '*'
self.document.note_symbol_footnote_ref(
refnode)
else:
refnode += nodes.Text(label)
if refname:
refnode['refname'] = refname
self.document.note_footnote_ref(refnode)
if utils.get_trim_footnote_ref_space(self.document.settings):
before = before.rstrip()
return (before, [refnode], remaining, [])
def reference(self, match, lineno, anonymous=None):
referencename = match.group('refname')
refname = normalize_name(referencename)
referencenode = nodes.reference(
referencename + match.group('refend'), referencename,
name=whitespace_normalize_name(referencename))
if anonymous:
referencenode['anonymous'] = 1
else:
referencenode['refname'] = refname
self.document.note_refname(referencenode)
string = match.string
matchstart = match.start('whole')
matchend = match.end('whole')
return (string[:matchstart], [referencenode], string[matchend:], [])
def anonymous_reference(self, match, lineno):
return self.reference(match, lineno, anonymous=1)
def standalone_uri(self, match, lineno):
if (not match.group('scheme')
or match.group('scheme').lower() in urischemes.schemes):
if match.group('email'):
addscheme = 'mailto:'
else:
addscheme = ''
text = match.group('whole')
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped,
refuri=addscheme + unescaped)]
else: # not a valid scheme
raise MarkupMismatch
def pep_reference(self, match, lineno):
text = match.group(0)
if text.startswith('pep-'):
pepnum = int(match.group('pepnum1'))
elif text.startswith('PEP'):
pepnum = int(match.group('pepnum2'))
else:
raise MarkupMismatch
ref = (self.document.settings.pep_base_url
+ self.document.settings.pep_file_url_template % pepnum)
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)]
rfc_url = 'rfc%d.html'
def rfc_reference(self, match, lineno):
text = match.group(0)
if text.startswith('RFC'):
rfcnum = int(match.group('rfcnum'))
ref = self.document.settings.rfc_base_url + self.rfc_url % rfcnum
else:
raise MarkupMismatch
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)]
def implicit_inline(self, text, lineno):
"""
Check each of the patterns in `self.implicit_dispatch` for a match,
and dispatch to the stored method for the pattern. Recursively check
the text before and after the match. Return a list of `nodes.Text`
and inline element nodes.
"""
if not text:
return []
for pattern, method in self.implicit_dispatch:
match = pattern.search(text)
if match:
try:
# Must recurse on strings before *and* after the match;
# there may be multiple patterns.
return (self.implicit_inline(text[:match.start()], lineno)
+ method(match, lineno) +
self.implicit_inline(text[match.end():], lineno))
except MarkupMismatch:
pass
return [nodes.Text(unescape(text), rawsource=unescape(text, 1))]
dispatch = {'*': emphasis,
'**': strong,
'`': interpreted_or_phrase_ref,
'``': literal,
'_`': inline_internal_target,
']_': footnote_reference,
'|': substitution_reference,
'_': reference,
'__': anonymous_reference}
def _loweralpha_to_int(s, _zero=(ord('a')-1)):
return ord(s) - _zero
def _upperalpha_to_int(s, _zero=(ord('A')-1)):
return ord(s) - _zero
def _lowerroman_to_int(s):
return roman.fromRoman(s.upper())
class Body(RSTState):
"""
Generic classifier of the first line of a block.
"""
double_width_pad_char = tableparser.TableParser.double_width_pad_char
"""Padding character for East Asian double-width text."""
enum = Struct()
"""Enumerated list parsing information."""
enum.formatinfo = {
'parens': Struct(prefix='(', suffix=')', start=1, end=-1),
'rparen': Struct(prefix='', suffix=')', start=0, end=-1),
'period': Struct(prefix='', suffix='.', start=0, end=-1)}
enum.formats = enum.formatinfo.keys()
enum.sequences = ['arabic', 'loweralpha', 'upperalpha',
'lowerroman', 'upperroman'] # ORDERED!
enum.sequencepats = {'arabic': '[0-9]+',
'loweralpha': '[a-z]',
'upperalpha': '[A-Z]',
'lowerroman': '[ivxlcdm]+',
'upperroman': '[IVXLCDM]+',}
enum.converters = {'arabic': int,
'loweralpha': _loweralpha_to_int,
'upperalpha': _upperalpha_to_int,
'lowerroman': _lowerroman_to_int,
'upperroman': roman.fromRoman}
enum.sequenceregexps = {}
for sequence in enum.sequences:
enum.sequenceregexps[sequence] = re.compile(
enum.sequencepats[sequence] + '$')
grid_table_top_pat = re.compile(r'\+-[-+]+-\+ *$')
"""Matches the top (& bottom) of a full table)."""
simple_table_top_pat = re.compile('=+( +=+)+ *$')
"""Matches the top of a simple table."""
simple_table_border_pat = re.compile('=+[ =]*$')
"""Matches the bottom & header bottom of a simple table."""
pats = {}
"""Fragments of patterns used by transitions."""
pats['nonalphanum7bit'] = '[!-/:-@[-`{-~]'
pats['alpha'] = '[a-zA-Z]'
pats['alphanum'] = '[a-zA-Z0-9]'
pats['alphanumplus'] = '[a-zA-Z0-9_-]'
pats['enum'] = ('(%(arabic)s|%(loweralpha)s|%(upperalpha)s|%(lowerroman)s'
'|%(upperroman)s|#)' % enum.sequencepats)
pats['optname'] = '%(alphanum)s%(alphanumplus)s*' % pats
# @@@ Loosen up the pattern? Allow Unicode?
pats['optarg'] = '(%(alpha)s%(alphanumplus)s*|<[^<>]+>)' % pats
pats['shortopt'] = r'(-|\+)%(alphanum)s( ?%(optarg)s)?' % pats
pats['longopt'] = r'(--|/)%(optname)s([ =]%(optarg)s)?' % pats
pats['option'] = r'(%(shortopt)s|%(longopt)s)' % pats
for format in enum.formats:
pats[format] = '(?P<%s>%s%s%s)' % (
format, re.escape(enum.formatinfo[format].prefix),
pats['enum'], re.escape(enum.formatinfo[format].suffix))
patterns = {
'bullet': u'[-+*\u2022\u2023\u2043]( +|$)',
'enumerator': r'(%(parens)s|%(rparen)s|%(period)s)( +|$)' % pats,
'field_marker': r':(?![: ])([^:\\]|\\.)*(?<! ):( +|$)',
'option_marker': r'%(option)s(, %(option)s)*( +| ?$)' % pats,
'doctest': r'>>>( +|$)',
'line_block': r'\|( +|$)',
'grid_table_top': grid_table_top_pat,
'simple_table_top': simple_table_top_pat,
'explicit_markup': r'\.\.( +|$)',
'anonymous': r'__( +|$)',
'line': r'(%(nonalphanum7bit)s)\1* *$' % pats,
'text': r''}
initial_transitions = (
'bullet',
'enumerator',
'field_marker',
'option_marker',
'doctest',
'line_block',
'grid_table_top',
'simple_table_top',
'explicit_markup',
'anonymous',
'line',
'text')
def indent(self, match, context, next_state):
"""Block quote."""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
elements = self.block_quote(indented, line_offset)
self.parent += elements
if not blank_finish:
self.parent += self.unindent_warning('Block quote')
return context, next_state, []
def block_quote(self, indented, line_offset):
elements = []
while indented:
(blockquote_lines,
attribution_lines,
attribution_offset,
indented,
new_line_offset) = self.split_attribution(indented, line_offset)
blockquote = nodes.block_quote()
self.nested_parse(blockquote_lines, line_offset, blockquote)
elements.append(blockquote)
if attribution_lines:
attribution, messages = self.parse_attribution(
attribution_lines, attribution_offset)
blockquote += attribution
elements += messages
line_offset = new_line_offset
while indented and not indented[0]:
indented = indented[1:]
line_offset += 1
return elements
# U+2014 is an em-dash:
attribution_pattern = re.compile(u'(---?(?!-)|\u2014) *(?=[^ \\n])')
def split_attribution(self, indented, line_offset):
"""
Check for a block quote attribution and split it off:
* First line after a blank line must begin with a dash ("--", "---",
em-dash; matches `self.attribution_pattern`).
* Every line after that must have consistent indentation.
* Attributions must be preceded by block quote content.
Return a tuple of: (block quote content lines, content offset,
attribution lines, attribution offset, remaining indented lines).
"""
blank = None
nonblank_seen = False
for i in range(len(indented)):
line = indented[i].rstrip()
if line:
if nonblank_seen and blank == i - 1: # last line blank
match = self.attribution_pattern.match(line)
if match:
attribution_end, indent = self.check_attribution(
indented, i)
if attribution_end:
a_lines = indented[i:attribution_end]
a_lines.trim_left(match.end(), end=1)
a_lines.trim_left(indent, start=1)
return (indented[:i], a_lines,
i, indented[attribution_end:],
line_offset + attribution_end)
nonblank_seen = True
else:
blank = i
else:
return (indented, None, None, None, None)
def check_attribution(self, indented, attribution_start):
"""
Check attribution shape.
Return the index past the end of the attribution, and the indent.
"""
indent = None
i = attribution_start + 1
for i in range(attribution_start + 1, len(indented)):
line = indented[i].rstrip()
if not line:
break
if indent is None:
indent = len(line) - len(line.lstrip())
elif len(line) - len(line.lstrip()) != indent:
return None, None # bad shape; not an attribution
else:
# return index of line after last attribution line:
i += 1
return i, (indent or 0)
def parse_attribution(self, indented, line_offset):
text = '\n'.join(indented).rstrip()
lineno = self.state_machine.abs_line_number() + line_offset
textnodes, messages = self.inline_text(text, lineno)
node = nodes.attribution(text, '', *textnodes)
node.line = lineno
# report with source and source-line results in
# ``IndexError: list index out of range``
# node.source, node.line = self.state_machine.get_source_and_line(lineno)
return node, messages
def bullet(self, match, context, next_state):
"""Bullet list item."""
bulletlist = nodes.bullet_list()
self.parent += bulletlist
bulletlist['bullet'] = match.string[0]
i, blank_finish = self.list_item(match.end())
bulletlist += i
offset = self.state_machine.line_offset + 1 # next line
new_line_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=bulletlist, initial_state='BulletList',
blank_finish=blank_finish)
self.goto_line(new_line_offset)
if not blank_finish:
self.parent += self.unindent_warning('Bullet list')
return [], next_state, []
def list_item(self, indent):
if self.state_machine.line[indent:]:
indented, line_offset, blank_finish = (
self.state_machine.get_known_indented(indent))
else:
indented, indent, line_offset, blank_finish = (
self.state_machine.get_first_known_indented(indent))
listitem = nodes.list_item('\n'.join(indented))
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=listitem)
return listitem, blank_finish
def enumerator(self, match, context, next_state):
"""Enumerated List Item"""
format, sequence, text, ordinal = self.parse_enumerator(match)
if not self.is_enumerated_list_item(ordinal, sequence, format):
raise statemachine.TransitionCorrection('text')
enumlist = nodes.enumerated_list()
self.parent += enumlist
if sequence == '#':
enumlist['enumtype'] = 'arabic'
else:
enumlist['enumtype'] = sequence
enumlist['prefix'] = self.enum.formatinfo[format].prefix
enumlist['suffix'] = self.enum.formatinfo[format].suffix
if ordinal != 1:
enumlist['start'] = ordinal
src, srcline = self.state_machine.get_source_and_line()
msg = self.reporter.info(
'Enumerated list start value not ordinal-1: "%s" (ordinal %s)'
% (text, ordinal), source=src, line=srcline)
self.parent += msg
listitem, blank_finish = self.list_item(match.end())
enumlist += listitem
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=enumlist, initial_state='EnumeratedList',
blank_finish=blank_finish,
extra_settings={'lastordinal': ordinal,
'format': format,
'auto': sequence == '#'})
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Enumerated list')
return [], next_state, []
def parse_enumerator(self, match, expected_sequence=None):
"""
Analyze an enumerator and return the results.
:Return:
- the enumerator format ('period', 'parens', or 'rparen'),
- the sequence used ('arabic', 'loweralpha', 'upperroman', etc.),
- the text of the enumerator, stripped of formatting, and
- the ordinal value of the enumerator ('a' -> 1, 'ii' -> 2, etc.;
``None`` is returned for invalid enumerator text).
The enumerator format has already been determined by the regular
expression match. If `expected_sequence` is given, that sequence is
tried first. If not, we check for Roman numeral 1. This way,
single-character Roman numerals (which are also alphabetical) can be
matched. If no sequence has been matched, all sequences are checked in
order.
"""
groupdict = match.groupdict()
sequence = ''
for format in self.enum.formats:
if groupdict[format]: # was this the format matched?
break # yes; keep `format`
else: # shouldn't happen
raise ParserError('enumerator format not matched')
text = groupdict[format][self.enum.formatinfo[format].start
:self.enum.formatinfo[format].end]
if text == '#':
sequence = '#'
elif expected_sequence:
try:
if self.enum.sequenceregexps[expected_sequence].match(text):
sequence = expected_sequence
except KeyError: # shouldn't happen
raise ParserError('unknown enumerator sequence: %s'
% sequence)
elif text == 'i':
sequence = 'lowerroman'
elif text == 'I':
sequence = 'upperroman'
if not sequence:
for sequence in self.enum.sequences:
if self.enum.sequenceregexps[sequence].match(text):
break
else: # shouldn't happen
raise ParserError('enumerator sequence not matched')
if sequence == '#':
ordinal = 1
else:
try:
ordinal = self.enum.converters[sequence](text)
except roman.InvalidRomanNumeralError:
ordinal = None
return format, sequence, text, ordinal
def is_enumerated_list_item(self, ordinal, sequence, format):
"""
Check validity based on the ordinal value and the second line.
Return true if the ordinal is valid and the second line is blank,
indented, or starts with the next enumerator or an auto-enumerator.
"""
if ordinal is None:
return None
try:
next_line = self.state_machine.next_line()
except EOFError: # end of input lines
self.state_machine.previous_line()
return 1
else:
self.state_machine.previous_line()
if not next_line[:1].strip(): # blank or indented
return 1
result = self.make_enumerator(ordinal + 1, sequence, format)
if result:
next_enumerator, auto_enumerator = result
try:
if ( next_line.startswith(next_enumerator) or
next_line.startswith(auto_enumerator) ):
return 1
except TypeError:
pass
return None
def make_enumerator(self, ordinal, sequence, format):
"""
Construct and return the next enumerated list item marker, and an
auto-enumerator ("#" instead of the regular enumerator).
Return ``None`` for invalid (out of range) ordinals.
""" #"
if sequence == '#':
enumerator = '#'
elif sequence == 'arabic':
enumerator = str(ordinal)
else:
if sequence.endswith('alpha'):
if ordinal > 26:
return None
enumerator = chr(ordinal + ord('a') - 1)
elif sequence.endswith('roman'):
try:
enumerator = roman.toRoman(ordinal)
except roman.RomanError:
return None
else: # shouldn't happen
raise ParserError('unknown enumerator sequence: "%s"'
% sequence)
if sequence.startswith('lower'):
enumerator = enumerator.lower()
elif sequence.startswith('upper'):
enumerator = enumerator.upper()
else: # shouldn't happen
raise ParserError('unknown enumerator sequence: "%s"'
% sequence)
formatinfo = self.enum.formatinfo[format]
next_enumerator = (formatinfo.prefix + enumerator + formatinfo.suffix
+ ' ')
auto_enumerator = formatinfo.prefix + '#' + formatinfo.suffix + ' '
return next_enumerator, auto_enumerator
def field_marker(self, match, context, next_state):
"""Field list item."""
field_list = nodes.field_list()
self.parent += field_list
field, blank_finish = self.field(match)
field_list += field
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=field_list, initial_state='FieldList',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Field list')
return [], next_state, []
def field(self, match):
name = self.parse_field_marker(match)
src, srcline = self.state_machine.get_source_and_line()
lineno = self.state_machine.abs_line_number()
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
field_node = nodes.field()
field_node.source = src
field_node.line = srcline
name_nodes, name_messages = self.inline_text(name, lineno)
field_node += nodes.field_name(name, '', *name_nodes)
field_body = nodes.field_body('\n'.join(indented), *name_messages)
field_node += field_body
if indented:
self.parse_field_body(indented, line_offset, field_body)
return field_node, blank_finish
def parse_field_marker(self, match):
"""Extract & return field name from a field marker match."""
field = match.group()[1:] # strip off leading ':'
field = field[:field.rfind(':')] # strip off trailing ':' etc.
return field
def parse_field_body(self, indented, offset, node):
self.nested_parse(indented, input_offset=offset, node=node)
def option_marker(self, match, context, next_state):
"""Option list item."""
optionlist = nodes.option_list()
try:
listitem, blank_finish = self.option_list_item(match)
except MarkupError, error:
# This shouldn't happen; pattern won't match.
src, srcline = self.state_machine.get_source_and_line()
msg = self.reporter.error(u'Invalid option list marker: %s' %
error, source=src, line=srcline)
self.parent += msg
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
elements = self.block_quote(indented, line_offset)
self.parent += elements
if not blank_finish:
self.parent += self.unindent_warning('Option list')
return [], next_state, []
self.parent += optionlist
optionlist += listitem
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=optionlist, initial_state='OptionList',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Option list')
return [], next_state, []
def option_list_item(self, match):
offset = self.state_machine.abs_line_offset()
options = self.parse_option_marker(match)
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
if not indented: # not an option list item
self.goto_line(offset)
raise statemachine.TransitionCorrection('text')
option_group = nodes.option_group('', *options)
description = nodes.description('\n'.join(indented))
option_list_item = nodes.option_list_item('', option_group,
description)
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=description)
return option_list_item, blank_finish
def parse_option_marker(self, match):
"""
Return a list of `node.option` and `node.option_argument` objects,
parsed from an option marker match.
:Exception: `MarkupError` for invalid option markers.
"""
optlist = []
optionstrings = match.group().rstrip().split(', ')
for optionstring in optionstrings:
tokens = optionstring.split()
delimiter = ' '
firstopt = tokens[0].split('=')
if len(firstopt) > 1:
# "--opt=value" form
tokens[:1] = firstopt
delimiter = '='
elif (len(tokens[0]) > 2
and ((tokens[0].startswith('-')
and not tokens[0].startswith('--'))
or tokens[0].startswith('+'))):
# "-ovalue" form
tokens[:1] = [tokens[0][:2], tokens[0][2:]]
delimiter = ''
if len(tokens) > 1 and (tokens[1].startswith('<')
and tokens[-1].endswith('>')):
# "-o <value1 value2>" form; join all values into one token
tokens[1:] = [' '.join(tokens[1:])]
if 0 < len(tokens) <= 2:
option = nodes.option(optionstring)
option += nodes.option_string(tokens[0], tokens[0])
if len(tokens) > 1:
option += nodes.option_argument(tokens[1], tokens[1],
delimiter=delimiter)
optlist.append(option)
else:
raise MarkupError(
'wrong number of option tokens (=%s), should be 1 or 2: '
'"%s"' % (len(tokens), optionstring))
return optlist
def doctest(self, match, context, next_state):
data = '\n'.join(self.state_machine.get_text_block())
self.parent += nodes.doctest_block(data, data)
return [], next_state, []
def line_block(self, match, context, next_state):
"""First line of a line block."""
block = nodes.line_block()
self.parent += block
lineno = self.state_machine.abs_line_number()
line, messages, blank_finish = self.line_block_line(match, lineno)
block += line
self.parent += messages
if not blank_finish:
offset = self.state_machine.line_offset + 1 # next line
new_line_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=block, initial_state='LineBlock',
blank_finish=0)
self.goto_line(new_line_offset)
if not blank_finish:
src, srcline = self.state_machine.get_source_and_line()
self.parent += self.reporter.warning(
'Line block ends without a blank line.',
source=src, line=srcline+1)
if len(block):
if block[0].indent is None:
block[0].indent = 0
self.nest_line_block_lines(block)
return [], next_state, []
def line_block_line(self, match, lineno):
"""Return one line element of a line_block."""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
text = u'\n'.join(indented)
text_nodes, messages = self.inline_text(text, lineno)
line = nodes.line(text, '', *text_nodes)
if match.string.rstrip() != '|': # not empty
line.indent = len(match.group(1)) - 1
return line, messages, blank_finish
def nest_line_block_lines(self, block):
for index in range(1, len(block)):
if block[index].indent is None:
block[index].indent = block[index - 1].indent
self.nest_line_block_segment(block)
def nest_line_block_segment(self, block):
indents = [item.indent for item in block]
least = min(indents)
new_items = []
new_block = nodes.line_block()
for item in block:
if item.indent > least:
new_block.append(item)
else:
if len(new_block):
self.nest_line_block_segment(new_block)
new_items.append(new_block)
new_block = nodes.line_block()
new_items.append(item)
if len(new_block):
self.nest_line_block_segment(new_block)
new_items.append(new_block)
block[:] = new_items
def grid_table_top(self, match, context, next_state):
"""Top border of a full table."""
return self.table_top(match, context, next_state,
self.isolate_grid_table,
tableparser.GridTableParser)
def simple_table_top(self, match, context, next_state):
"""Top border of a simple table."""
return self.table_top(match, context, next_state,
self.isolate_simple_table,
tableparser.SimpleTableParser)
def table_top(self, match, context, next_state,
isolate_function, parser_class):
"""Top border of a generic table."""
nodelist, blank_finish = self.table(isolate_function, parser_class)
self.parent += nodelist
if not blank_finish:
src, srcline = self.state_machine.get_source_and_line()
msg = self.reporter.warning(
'Blank line required after table.',
source=src, line=srcline+1)
self.parent += msg
return [], next_state, []
def table(self, isolate_function, parser_class):
"""Parse a table."""
block, messages, blank_finish = isolate_function()
if block:
try:
parser = parser_class()
tabledata = parser.parse(block)
tableline = (self.state_machine.abs_line_number() - len(block)
+ 1)
table = self.build_table(tabledata, tableline)
nodelist = [table] + messages
except tableparser.TableMarkupError, detail:
nodelist = self.malformed_table(
block, ' '.join(detail.args)) + messages
else:
nodelist = messages
return nodelist, blank_finish
def isolate_grid_table(self):
messages = []
blank_finish = 1
try:
block = self.state_machine.get_text_block(flush_left=1)
except statemachine.UnexpectedIndentationError, instance:
block, src, srcline = instance.args
messages.append(self.reporter.error('Unexpected indentation.',
source=src, line=srcline))
blank_finish = 0
block.disconnect()
# for East Asian chars:
block.pad_double_width(self.double_width_pad_char)
width = len(block[0].strip())
for i in range(len(block)):
block[i] = block[i].strip()
if block[i][0] not in '+|': # check left edge
blank_finish = 0
self.state_machine.previous_line(len(block) - i)
del block[i:]
break
if not self.grid_table_top_pat.match(block[-1]): # find bottom
blank_finish = 0
# from second-last to third line of table:
for i in range(len(block) - 2, 1, -1):
if self.grid_table_top_pat.match(block[i]):
self.state_machine.previous_line(len(block) - i + 1)
del block[i+1:]
break
else:
messages.extend(self.malformed_table(block))
return [], messages, blank_finish
for i in range(len(block)): # check right edge
if len(block[i]) != width or block[i][-1] not in '+|':
messages.extend(self.malformed_table(block))
return [], messages, blank_finish
return block, messages, blank_finish
def isolate_simple_table(self):
start = self.state_machine.line_offset
lines = self.state_machine.input_lines
limit = len(lines) - 1
toplen = len(lines[start].strip())
pattern_match = self.simple_table_border_pat.match
found = 0
found_at = None
i = start + 1
while i <= limit:
line = lines[i]
match = pattern_match(line)
if match:
if len(line.strip()) != toplen:
self.state_machine.next_line(i - start)
messages = self.malformed_table(
lines[start:i+1], 'Bottom/header table border does '
'not match top border.')
return [], messages, i == limit or not lines[i+1].strip()
found += 1
found_at = i
if found == 2 or i == limit or not lines[i+1].strip():
end = i
break
i += 1
else: # reached end of input_lines
if found:
extra = ' or no blank line after table bottom'
self.state_machine.next_line(found_at - start)
block = lines[start:found_at+1]
else:
extra = ''
self.state_machine.next_line(i - start - 1)
block = lines[start:]
messages = self.malformed_table(
block, 'No bottom table border found%s.' % extra)
return [], messages, not extra
self.state_machine.next_line(end - start)
block = lines[start:end+1]
# for East Asian chars:
block.pad_double_width(self.double_width_pad_char)
return block, [], end == limit or not lines[end+1].strip()
def malformed_table(self, block, detail=''):
block.replace(self.double_width_pad_char, '')
data = '\n'.join(block)
message = 'Malformed table.'
startline = self.state_machine.abs_line_number() - len(block) + 1
src, srcline = self.state_machine.get_source_and_line(startline)
if detail:
message += '\n' + detail
error = self.reporter.error(message, nodes.literal_block(data, data),
source=src, line=srcline)
return [error]
def build_table(self, tabledata, tableline, stub_columns=0):
colwidths, headrows, bodyrows = tabledata
table = nodes.table()
tgroup = nodes.tgroup(cols=len(colwidths))
table += tgroup
for colwidth in colwidths:
colspec = nodes.colspec(colwidth=colwidth)
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
if headrows:
thead = nodes.thead()
tgroup += thead
for row in headrows:
thead += self.build_table_row(row, tableline)
tbody = nodes.tbody()
tgroup += tbody
for row in bodyrows:
tbody += self.build_table_row(row, tableline)
return table
def build_table_row(self, rowdata, tableline):
row = nodes.row()
for cell in rowdata:
if cell is None:
continue
morerows, morecols, offset, cellblock = cell
attributes = {}
if morerows:
attributes['morerows'] = morerows
if morecols:
attributes['morecols'] = morecols
entry = nodes.entry(**attributes)
row += entry
if ''.join(cellblock):
self.nested_parse(cellblock, input_offset=tableline+offset,
node=entry)
return row
explicit = Struct()
"""Patterns and constants used for explicit markup recognition."""
explicit.patterns = Struct(
target=re.compile(r"""
(
_ # anonymous target
| # *OR*
(?!_) # no underscore at the beginning
(?P<quote>`?) # optional open quote
(?![ `]) # first char. not space or
# backquote
(?P<name> # reference name
.+?
)
%(non_whitespace_escape_before)s
(?P=quote) # close quote if open quote used
)
(?<!(?<!\x00):) # no unescaped colon at end
%(non_whitespace_escape_before)s
[ ]? # optional space
: # end of reference name
([ ]+|$) # followed by whitespace
""" % vars(Inliner), re.VERBOSE),
reference=re.compile(r"""
(
(?P<simple>%(simplename)s)_
| # *OR*
` # open backquote
(?![ ]) # not space
(?P<phrase>.+?) # hyperlink phrase
%(non_whitespace_escape_before)s
`_ # close backquote,
# reference mark
)
$ # end of string
""" % vars(Inliner), re.VERBOSE | re.UNICODE),
substitution=re.compile(r"""
(
(?![ ]) # first char. not space
(?P<name>.+?) # substitution text
%(non_whitespace_escape_before)s
\| # close delimiter
)
([ ]+|$) # followed by whitespace
""" % vars(Inliner), re.VERBOSE),)
def footnote(self, match):
src, srcline = self.state_machine.get_source_and_line()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
label = match.group(1)
name = normalize_name(label)
footnote = nodes.footnote('\n'.join(indented))
footnote.source = src
footnote.line = srcline
if name[0] == '#': # auto-numbered
name = name[1:] # autonumber label
footnote['auto'] = 1
if name:
footnote['names'].append(name)
self.document.note_autofootnote(footnote)
elif name == '*': # auto-symbol
name = ''
footnote['auto'] = '*'
self.document.note_symbol_footnote(footnote)
else: # manually numbered
footnote += nodes.label('', label)
footnote['names'].append(name)
self.document.note_footnote(footnote)
if name:
self.document.note_explicit_target(footnote, footnote)
else:
self.document.set_id(footnote, footnote)
if indented:
self.nested_parse(indented, input_offset=offset, node=footnote)
return [footnote], blank_finish
def citation(self, match):
src, srcline = self.state_machine.get_source_and_line()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
label = match.group(1)
name = normalize_name(label)
citation = nodes.citation('\n'.join(indented))
citation.source = src
citation.line = srcline
citation += nodes.label('', label)
citation['names'].append(name)
self.document.note_citation(citation)
self.document.note_explicit_target(citation, citation)
if indented:
self.nested_parse(indented, input_offset=offset, node=citation)
return [citation], blank_finish
def hyperlink_target(self, match):
pattern = self.explicit.patterns.target
lineno = self.state_machine.abs_line_number()
src, srcline = self.state_machine.get_source_and_line()
block, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(
match.end(), until_blank=1, strip_indent=0)
blocktext = match.string[:match.end()] + '\n'.join(block)
block = [escape2null(line) for line in block]
escaped = block[0]
blockindex = 0
while 1:
targetmatch = pattern.match(escaped)
if targetmatch:
break
blockindex += 1
try:
escaped += block[blockindex]
except IndexError:
raise MarkupError('malformed hyperlink target.')
del block[:blockindex]
block[0] = (block[0] + ' ')[targetmatch.end()-len(escaped)-1:].strip()
target = self.make_target(block, blocktext, lineno,
targetmatch.group('name'))
return [target], blank_finish
def make_target(self, block, block_text, lineno, target_name):
target_type, data = self.parse_target(block, block_text, lineno)
if target_type == 'refname':
target = nodes.target(block_text, '', refname=normalize_name(data))
target.indirect_reference_name = data
self.add_target(target_name, '', target, lineno)
self.document.note_indirect_target(target)
return target
elif target_type == 'refuri':
target = nodes.target(block_text, '')
self.add_target(target_name, data, target, lineno)
return target
else:
return data
def parse_target(self, block, block_text, lineno):
"""
Determine the type of reference of a target.
:Return: A 2-tuple, one of:
- 'refname' and the indirect reference name
- 'refuri' and the URI
- 'malformed' and a system_message node
"""
if block and block[-1].strip()[-1:] == '_': # possible indirect target
reference = ' '.join([line.strip() for line in block])
refname = self.is_reference(reference)
if refname:
return 'refname', refname
reference = ''.join([''.join(line.split()) for line in block])
return 'refuri', unescape(reference)
def is_reference(self, reference):
match = self.explicit.patterns.reference.match(
whitespace_normalize_name(reference))
if not match:
return None
return unescape(match.group('simple') or match.group('phrase'))
def add_target(self, targetname, refuri, target, lineno):
target.line = lineno
if targetname:
name = normalize_name(unescape(targetname))
target['names'].append(name)
if refuri:
uri = self.inliner.adjust_uri(refuri)
if uri:
target['refuri'] = uri
else:
raise ApplicationError('problem with URI: %r' % refuri)
self.document.note_explicit_target(target, self.parent)
else: # anonymous target
if refuri:
target['refuri'] = refuri
target['anonymous'] = 1
self.document.note_anonymous_target(target)
def substitution_def(self, match):
pattern = self.explicit.patterns.substitution
src, srcline = self.state_machine.get_source_and_line()
block, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
strip_indent=0)
blocktext = (match.string[:match.end()] + '\n'.join(block))
block.disconnect()
escaped = escape2null(block[0].rstrip())
blockindex = 0
while 1:
subdefmatch = pattern.match(escaped)
if subdefmatch:
break
blockindex += 1
try:
escaped = escaped + ' ' + escape2null(block[blockindex].strip())
except IndexError:
raise MarkupError('malformed substitution definition.')
del block[:blockindex] # strip out the substitution marker
block[0] = (block[0].strip() + ' ')[subdefmatch.end()-len(escaped)-1:-1]
if not block[0]:
del block[0]
offset += 1
while block and not block[-1].strip():
block.pop()
subname = subdefmatch.group('name')
substitution_node = nodes.substitution_definition(blocktext)
substitution_node.source = src
substitution_node.line = srcline
if not block:
msg = self.reporter.warning(
'Substitution definition "%s" missing contents.' % subname,
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
return [msg], blank_finish
block[0] = block[0].strip()
substitution_node['names'].append(
nodes.whitespace_normalize_name(subname))
new_abs_offset, blank_finish = self.nested_list_parse(
block, input_offset=offset, node=substitution_node,
initial_state='SubstitutionDef', blank_finish=blank_finish)
i = 0
for node in substitution_node[:]:
if not (isinstance(node, nodes.Inline) or
isinstance(node, nodes.Text)):
self.parent += substitution_node[i]
del substitution_node[i]
else:
i += 1
for node in substitution_node.traverse(nodes.Element):
if self.disallowed_inside_substitution_definitions(node):
pformat = nodes.literal_block('', node.pformat().rstrip())
msg = self.reporter.error(
'Substitution definition contains illegal element:',
pformat, nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
return [msg], blank_finish
if len(substitution_node) == 0:
msg = self.reporter.warning(
'Substitution definition "%s" empty or invalid.' % subname,
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
return [msg], blank_finish
self.document.note_substitution_def(
substitution_node, subname, self.parent)
return [substitution_node], blank_finish
def disallowed_inside_substitution_definitions(self, node):
if (node['ids'] or
isinstance(node, nodes.reference) and node.get('anonymous') or
isinstance(node, nodes.footnote_reference) and node.get('auto')):
return 1
else:
return 0
def directive(self, match, **option_presets):
"""Returns a 2-tuple: list of nodes, and a "blank finish" boolean."""
type_name = match.group(1)
directive_class, messages = directives.directive(
type_name, self.memo.language, self.document)
self.parent += messages
if directive_class:
return self.run_directive(
directive_class, match, type_name, option_presets)
else:
return self.unknown_directive(type_name)
def run_directive(self, directive, match, type_name, option_presets):
"""
Parse a directive then run its directive function.
Parameters:
- `directive`: The class implementing the directive. Must be
a subclass of `rst.Directive`.
- `match`: A regular expression match object which matched the first
line of the directive.
- `type_name`: The directive name, as used in the source text.
- `option_presets`: A dictionary of preset options, defaults for the
directive options. Currently, only an "alt" option is passed by
substitution definitions (value: the substitution name), which may
be used by an embedded image directive.
Returns a 2-tuple: list of nodes, and a "blank finish" boolean.
"""
if isinstance(directive, (FunctionType, MethodType)):
from docutils.parsers.rst import convert_directive_function
directive = convert_directive_function(directive)
lineno = self.state_machine.abs_line_number()
src, srcline = self.state_machine.get_source_and_line()
initial_line_offset = self.state_machine.line_offset
indented, indent, line_offset, blank_finish \
= self.state_machine.get_first_known_indented(match.end(),
strip_top=0)
block_text = '\n'.join(self.state_machine.input_lines[
initial_line_offset : self.state_machine.line_offset + 1])
try:
arguments, options, content, content_offset = (
self.parse_directive_block(indented, line_offset,
directive, option_presets))
except MarkupError, detail:
error = self.reporter.error(
'Error in "%s" directive:\n%s.' % (type_name,
' '.join(detail.args)),
nodes.literal_block(block_text, block_text),
source=src, line=srcline)
return [error], blank_finish
directive_instance = directive(
type_name, arguments, options, content, lineno,
content_offset, block_text, self, self.state_machine)
try:
result = directive_instance.run()
except docutils.parsers.rst.DirectiveError, error:
msg_node = self.reporter.system_message(error.level, error.msg,
source=src, line=srcline)
msg_node += nodes.literal_block(block_text, block_text)
result = [msg_node]
assert isinstance(result, list), \
'Directive "%s" must return a list of nodes.' % type_name
for i in range(len(result)):
assert isinstance(result[i], nodes.Node), \
('Directive "%s" returned non-Node object (index %s): %r'
% (type_name, i, result[i]))
return (result,
blank_finish or self.state_machine.is_next_line_blank())
def parse_directive_block(self, indented, line_offset, directive,
option_presets):
option_spec = directive.option_spec
has_content = directive.has_content
if indented and not indented[0].strip():
indented.trim_start()
line_offset += 1
while indented and not indented[-1].strip():
indented.trim_end()
if indented and (directive.required_arguments
or directive.optional_arguments
or option_spec):
for i, line in enumerate(indented):
if not line.strip():
break
else:
i += 1
arg_block = indented[:i]
content = indented[i+1:]
content_offset = line_offset + i + 1
else:
content = indented
content_offset = line_offset
arg_block = []
if option_spec:
options, arg_block = self.parse_directive_options(
option_presets, option_spec, arg_block)
else:
options = {}
if arg_block and not (directive.required_arguments
or directive.optional_arguments):
content = arg_block + indented[i:]
content_offset = line_offset
arg_block = []
while content and not content[0].strip():
content.trim_start()
content_offset += 1
if directive.required_arguments or directive.optional_arguments:
arguments = self.parse_directive_arguments(
directive, arg_block)
else:
arguments = []
if content and not has_content:
raise MarkupError('no content permitted')
return (arguments, options, content, content_offset)
def parse_directive_options(self, option_presets, option_spec, arg_block):
options = option_presets.copy()
for i in range(len(arg_block)):
if arg_block[i][:1] == ':':
opt_block = arg_block[i:]
arg_block = arg_block[:i]
break
else:
opt_block = []
if opt_block:
success, data = self.parse_extension_options(option_spec,
opt_block)
if success: # data is a dict of options
options.update(data)
else: # data is an error string
raise MarkupError(data)
return options, arg_block
def parse_directive_arguments(self, directive, arg_block):
required = directive.required_arguments
optional = directive.optional_arguments
arg_text = '\n'.join(arg_block)
arguments = arg_text.split()
if len(arguments) < required:
raise MarkupError('%s argument(s) required, %s supplied'
% (required, len(arguments)))
elif len(arguments) > required + optional:
if directive.final_argument_whitespace:
arguments = arg_text.split(None, required + optional - 1)
else:
raise MarkupError(
'maximum %s argument(s) allowed, %s supplied'
% (required + optional, len(arguments)))
return arguments
def parse_extension_options(self, option_spec, datalines):
"""
Parse `datalines` for a field list containing extension options
matching `option_spec`.
:Parameters:
- `option_spec`: a mapping of option name to conversion
function, which should raise an exception on bad input.
- `datalines`: a list of input strings.
:Return:
- Success value, 1 or 0.
- An option dictionary on success, an error string on failure.
"""
node = nodes.field_list()
newline_offset, blank_finish = self.nested_list_parse(
datalines, 0, node, initial_state='ExtensionOptions',
blank_finish=1)
if newline_offset != len(datalines): # incomplete parse of block
return 0, 'invalid option block'
try:
options = utils.extract_extension_options(node, option_spec)
except KeyError, detail:
return 0, ('unknown option: "%s"' % detail.args[0])
except (ValueError, TypeError), detail:
return 0, ('invalid option value: %s' % ' '.join(detail.args))
except utils.ExtensionOptionError, detail:
return 0, ('invalid option data: %s' % ' '.join(detail.args))
if blank_finish:
return 1, options
else:
return 0, 'option data incompletely parsed'
def unknown_directive(self, type_name):
src, srcline = self.state_machine.get_source_and_line()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(0, strip_indent=0)
text = '\n'.join(indented)
error = self.reporter.error(
'Unknown directive type "%s".' % type_name,
nodes.literal_block(text, text), source=src, line=srcline)
return [error], blank_finish
def comment(self, match):
if not match.string[match.end():].strip() \
and self.state_machine.is_next_line_blank(): # an empty comment?
return [nodes.comment()], 1 # "A tiny but practical wart."
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
while indented and not indented[-1].strip():
indented.trim_end()
text = '\n'.join(indented)
return [nodes.comment(text, text)], blank_finish
explicit.constructs = [
(footnote,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\[
( # footnote label:
[0-9]+ # manually numbered footnote
| # *OR*
\# # anonymous auto-numbered footnote
| # *OR*
\#%s # auto-number ed?) footnote label
| # *OR*
\* # auto-symbol footnote
)
\]
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE)),
(citation,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\[(%s)\] # citation label
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE)),
(hyperlink_target,
re.compile(r"""
\.\.[ ]+ # explicit markup start
_ # target indicator
(?![ ]|$) # first char. not space or EOL
""", re.VERBOSE)),
(substitution_def,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\| # substitution indicator
(?![ ]|$) # first char. not space or EOL
""", re.VERBOSE)),
(directive,
re.compile(r"""
\.\.[ ]+ # explicit markup start
(%s) # directive name
[ ]? # optional space
:: # directive delimiter
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE))]
def explicit_markup(self, match, context, next_state):
"""Footnotes, hyperlink targets, directives, comments."""
nodelist, blank_finish = self.explicit_construct(match)
self.parent += nodelist
self.explicit_list(blank_finish)
return [], next_state, []
def explicit_construct(self, match):
"""Determine which explicit construct this is, parse & return it."""
errors = []
for method, pattern in self.explicit.constructs:
expmatch = pattern.match(match.string)
if expmatch:
try:
return method(self, expmatch)
except MarkupError, error: # never reached?
message = ' '.join(error.args)
src, srcline = self.state_machine.get_source_and_line()
errors.append(self.reporter.warning(
message, source=src, line=srcline))
break
nodelist, blank_finish = self.comment(match)
return nodelist + errors, blank_finish
def explicit_list(self, blank_finish):
"""
Create a nested state machine for a series of explicit markup
constructs (including anonymous hyperlink targets).
"""
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=self.parent, initial_state='Explicit',
blank_finish=blank_finish,
match_titles=self.state_machine.match_titles)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Explicit markup')
def anonymous(self, match, context, next_state):
"""Anonymous hyperlink targets."""
nodelist, blank_finish = self.anonymous_target(match)
self.parent += nodelist
self.explicit_list(blank_finish)
return [], next_state, []
def anonymous_target(self, match):
lineno = self.state_machine.abs_line_number()
block, indent, offset, blank_finish \
= self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
blocktext = match.string[:match.end()] + '\n'.join(block)
block = [escape2null(line) for line in block]
target = self.make_target(block, blocktext, lineno, '')
return [target], blank_finish
def line(self, match, context, next_state):
"""Section title overline or transition marker."""
if self.state_machine.match_titles:
return [match.string], 'Line', []
elif match.string.strip() == '::':
raise statemachine.TransitionCorrection('text')
elif len(match.string.strip()) < 4:
msg = self.reporter.info(
'Unexpected possible title overline or transition.\n'
"Treating it as ordinary text because it's so short.",
line=self.state_machine.abs_line_number())
self.parent += msg
raise statemachine.TransitionCorrection('text')
else:
blocktext = self.state_machine.line
msg = self.reporter.severe(
'Unexpected section title or transition.',
nodes.literal_block(blocktext, blocktext),
line=self.state_machine.abs_line_number())
self.parent += msg
return [], next_state, []
def text(self, match, context, next_state):
"""Titles, definition lists, paragraphs."""
return [match.string], 'Text', []
class RFC2822Body(Body):
"""
RFC2822 headers are only valid as the first constructs in documents. As
soon as anything else appears, the `Body` state should take over.
"""
patterns = Body.patterns.copy() # can't modify the original
patterns['rfc2822'] = r'[!-9;-~]+:( +|$)'
initial_transitions = [(name, 'Body')
for name in Body.initial_transitions]
initial_transitions.insert(-1, ('rfc2822', 'Body')) # just before 'text'
def rfc2822(self, match, context, next_state):
"""RFC2822-style field list item."""
fieldlist = nodes.field_list(classes=['rfc2822'])
self.parent += fieldlist
field, blank_finish = self.rfc2822_field(match)
fieldlist += field
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=fieldlist, initial_state='RFC2822List',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning(
'RFC2822-style field list')
return [], next_state, []
def rfc2822_field(self, match):
name = match.string[:match.string.find(':')]
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
fieldnode = nodes.field()
fieldnode += nodes.field_name(name, name)
fieldbody = nodes.field_body('\n'.join(indented))
fieldnode += fieldbody
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=fieldbody)
return fieldnode, blank_finish
class SpecializedBody(Body):
"""
Superclass for second and subsequent compound element members. Compound
elements are lists and list-like constructs.
All transition methods are disabled (redefined as `invalid_input`).
Override individual methods in subclasses to re-enable.
For example, once an initial bullet list item, say, is recognized, the
`BulletList` subclass takes over, with a "bullet_list" node as its
container. Upon encountering the initial bullet list item, `Body.bullet`
calls its ``self.nested_list_parse`` (`RSTState.nested_list_parse`), which
starts up a nested parsing session with `BulletList` as the initial state.
Only the ``bullet`` transition method is enabled in `BulletList`; as long
as only bullet list items are encountered, they are parsed and inserted
into the container. The first construct which is *not* a bullet list item
triggers the `invalid_input` method, which ends the nested parse and
closes the container. `BulletList` needs to recognize input that is
invalid in the context of a bullet list, which means everything *other
than* bullet list items, so it inherits the transition list created in
`Body`.
"""
def invalid_input(self, match=None, context=None, next_state=None):
"""Not a compound element member. Abort this state machine."""
self.state_machine.previous_line() # back up so parent SM can reassess
raise EOFError
indent = invalid_input
bullet = invalid_input
enumerator = invalid_input
field_marker = invalid_input
option_marker = invalid_input
doctest = invalid_input
line_block = invalid_input
grid_table_top = invalid_input
simple_table_top = invalid_input
explicit_markup = invalid_input
anonymous = invalid_input
line = invalid_input
text = invalid_input
class BulletList(SpecializedBody):
"""Second and subsequent bullet_list list_items."""
def bullet(self, match, context, next_state):
"""Bullet list item."""
if match.string[0] != self.parent['bullet']:
# different bullet: new list
self.invalid_input()
listitem, blank_finish = self.list_item(match.end())
self.parent += listitem
self.blank_finish = blank_finish
return [], next_state, []
class DefinitionList(SpecializedBody):
"""Second and subsequent definition_list_items."""
def text(self, match, context, next_state):
"""Definition lists."""
return [match.string], 'Definition', []
class EnumeratedList(SpecializedBody):
"""Second and subsequent enumerated_list list_items."""
def enumerator(self, match, context, next_state):
"""Enumerated list item."""
format, sequence, text, ordinal = self.parse_enumerator(
match, self.parent['enumtype'])
if ( format != self.format
or (sequence != '#' and (sequence != self.parent['enumtype']
or self.auto
or ordinal != (self.lastordinal + 1)))
or not self.is_enumerated_list_item(ordinal, sequence, format)):
# different enumeration: new list
self.invalid_input()
if sequence == '#':
self.auto = 1
listitem, blank_finish = self.list_item(match.end())
self.parent += listitem
self.blank_finish = blank_finish
self.lastordinal = ordinal
return [], next_state, []
class FieldList(SpecializedBody):
"""Second and subsequent field_list fields."""
def field_marker(self, match, context, next_state):
"""Field list field."""
field, blank_finish = self.field(match)
self.parent += field
self.blank_finish = blank_finish
return [], next_state, []
class OptionList(SpecializedBody):
"""Second and subsequent option_list option_list_items."""
def option_marker(self, match, context, next_state):
"""Option list item."""
try:
option_list_item, blank_finish = self.option_list_item(match)
except MarkupError:
self.invalid_input()
self.parent += option_list_item
self.blank_finish = blank_finish
return [], next_state, []
class RFC2822List(SpecializedBody, RFC2822Body):
"""Second and subsequent RFC2822-style field_list fields."""
patterns = RFC2822Body.patterns
initial_transitions = RFC2822Body.initial_transitions
def rfc2822(self, match, context, next_state):
"""RFC2822-style field list item."""
field, blank_finish = self.rfc2822_field(match)
self.parent += field
self.blank_finish = blank_finish
return [], 'RFC2822List', []
blank = SpecializedBody.invalid_input
class ExtensionOptions(FieldList):
"""
Parse field_list fields for extension options.
No nested parsing is done (including inline markup parsing).
"""
def parse_field_body(self, indented, offset, node):
"""Override `Body.parse_field_body` for simpler parsing."""
lines = []
for line in list(indented) + ['']:
if line.strip():
lines.append(line)
elif lines:
text = '\n'.join(lines)
node += nodes.paragraph(text, text)
lines = []
class LineBlock(SpecializedBody):
"""Second and subsequent lines of a line_block."""
blank = SpecializedBody.invalid_input
def line_block(self, match, context, next_state):
"""New line of line block."""
lineno = self.state_machine.abs_line_number()
line, messages, blank_finish = self.line_block_line(match, lineno)
self.parent += line
self.parent.parent += messages
self.blank_finish = blank_finish
return [], next_state, []
class Explicit(SpecializedBody):
"""Second and subsequent explicit markup construct."""
def explicit_markup(self, match, context, next_state):
"""Footnotes, hyperlink targets, directives, comments."""
nodelist, blank_finish = self.explicit_construct(match)
self.parent += nodelist
self.blank_finish = blank_finish
return [], next_state, []
def anonymous(self, match, context, next_state):
"""Anonymous hyperlink targets."""
nodelist, blank_finish = self.anonymous_target(match)
self.parent += nodelist
self.blank_finish = blank_finish
return [], next_state, []
blank = SpecializedBody.invalid_input
class SubstitutionDef(Body):
"""
Parser for the contents of a substitution_definition element.
"""
patterns = {
'embedded_directive': re.compile(r'(%s)::( +|$)'
% Inliner.simplename, re.UNICODE),
'text': r''}
initial_transitions = ['embedded_directive', 'text']
def embedded_directive(self, match, context, next_state):
nodelist, blank_finish = self.directive(match,
alt=self.parent['names'][0])
self.parent += nodelist
if not self.state_machine.at_eof():
self.blank_finish = blank_finish
raise EOFError
def text(self, match, context, next_state):
if not self.state_machine.at_eof():
self.blank_finish = self.state_machine.is_next_line_blank()
raise EOFError
class Text(RSTState):
"""
Classifier of second line of a text block.
Could be a paragraph, a definition list item, or a title.
"""
patterns = {'underline': Body.patterns['line'],
'text': r''}
initial_transitions = [('underline', 'Body'), ('text', 'Body')]
def blank(self, match, context, next_state):
"""End of paragraph."""
# NOTE: self.paragraph returns [ node, system_message(s) ], literalnext
paragraph, literalnext = self.paragraph(
context, self.state_machine.abs_line_number() - 1)
self.parent += paragraph
if literalnext:
self.parent += self.literal_block()
return [], 'Body', []
def eof(self, context):
if context:
self.blank(None, context, None)
return []
def indent(self, match, context, next_state):
"""Definition list item."""
definitionlist = nodes.definition_list()
definitionlistitem, blank_finish = self.definition_list_item(context)
definitionlist += definitionlistitem
self.parent += definitionlist
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=definitionlist, initial_state='DefinitionList',
blank_finish=blank_finish, blank_finish_state='Definition')
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Definition list')
return [], 'Body', []
def underline(self, match, context, next_state):
"""Section title."""
lineno = self.state_machine.abs_line_number()
src, srcline = self.state_machine.get_source_and_line()
title = context[0].rstrip()
underline = match.string.rstrip()
source = title + '\n' + underline
messages = []
if column_width(title) > len(underline):
if len(underline) < 4:
if self.state_machine.match_titles:
msg = self.reporter.info(
'Possible title underline, too short for the title.\n'
"Treating it as ordinary text because it's so short.",
source=src, line=srcline)
self.parent += msg
raise statemachine.TransitionCorrection('text')
else:
blocktext = context[0] + '\n' + self.state_machine.line
msg = self.reporter.warning(
'Title underline too short.',
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
messages.append(msg)
if not self.state_machine.match_titles:
blocktext = context[0] + '\n' + self.state_machine.line
msg = self.reporter.severe(
'Unexpected section title.',
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
self.parent += messages
self.parent += msg
return [], next_state, []
style = underline[0]
context[:] = []
self.section(title, source, style, lineno - 1, messages)
return [], next_state, []
def text(self, match, context, next_state):
"""Paragraph."""
startline = self.state_machine.abs_line_number() - 1
msg = None
try:
block = self.state_machine.get_text_block(flush_left=1)
except statemachine.UnexpectedIndentationError, instance:
block, src, srcline = instance.args
msg = self.reporter.error('Unexpected indentation.',
source=src, line=srcline)
lines = context + list(block)
paragraph, literalnext = self.paragraph(lines, startline)
self.parent += paragraph
self.parent += msg
if literalnext:
try:
self.state_machine.next_line()
except EOFError:
pass
self.parent += self.literal_block()
return [], next_state, []
def literal_block(self):
"""Return a list of nodes."""
indented, indent, offset, blank_finish = \
self.state_machine.get_indented()
while indented and not indented[-1].strip():
indented.trim_end()
if not indented:
return self.quoted_literal_block()
data = '\n'.join(indented)
literal_block = nodes.literal_block(data, data)
literal_block.line = offset + 1
nodelist = [literal_block]
if not blank_finish:
nodelist.append(self.unindent_warning('Literal block'))
return nodelist
def quoted_literal_block(self):
abs_line_offset = self.state_machine.abs_line_offset()
offset = self.state_machine.line_offset
parent_node = nodes.Element()
new_abs_offset = self.nested_parse(
self.state_machine.input_lines[offset:],
input_offset=abs_line_offset, node=parent_node, match_titles=0,
state_machine_kwargs={'state_classes': (QuotedLiteralBlock,),
'initial_state': 'QuotedLiteralBlock'})
self.goto_line(new_abs_offset)
return parent_node.children
def definition_list_item(self, termline):
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
definitionlistitem = nodes.definition_list_item(
'\n'.join(termline + list(indented)))
lineno = self.state_machine.abs_line_number() - 1
src, srcline = self.state_machine.get_source_and_line()
definitionlistitem.source = src
definitionlistitem.line = srcline - 1
termlist, messages = self.term(termline, lineno)
definitionlistitem += termlist
definition = nodes.definition('', *messages)
definitionlistitem += definition
if termline[0][-2:] == '::':
definition += self.reporter.info(
'Blank line missing before literal block (after the "::")? '
'Interpreted as a definition list item.',
source=src, line=srcline)
self.nested_parse(indented, input_offset=line_offset, node=definition)
return definitionlistitem, blank_finish
classifier_delimiter = re.compile(' +: +')
def term(self, lines, lineno):
"""Return a definition_list's term and optional classifiers."""
assert len(lines) == 1
text_nodes, messages = self.inline_text(lines[0], lineno)
term_node = nodes.term()
node_list = [term_node]
for i in range(len(text_nodes)):
node = text_nodes[i]
if isinstance(node, nodes.Text):
parts = self.classifier_delimiter.split(node.rawsource)
if len(parts) == 1:
node_list[-1] += node
else:
node_list[-1] += nodes.Text(parts[0].rstrip())
for part in parts[1:]:
classifier_node = nodes.classifier('', part)
node_list.append(classifier_node)
else:
node_list[-1] += node
return node_list, messages
class SpecializedText(Text):
"""
Superclass for second and subsequent lines of Text-variants.
All transition methods are disabled. Override individual methods in
subclasses to re-enable.
"""
def eof(self, context):
"""Incomplete construct."""
return []
def invalid_input(self, match=None, context=None, next_state=None):
"""Not a compound element member. Abort this state machine."""
raise EOFError
blank = invalid_input
indent = invalid_input
underline = invalid_input
text = invalid_input
class Definition(SpecializedText):
"""Second line of potential definition_list_item."""
def eof(self, context):
"""Not a definition."""
self.state_machine.previous_line(2) # so parent SM can reassess
return []
def indent(self, match, context, next_state):
"""Definition list item."""
definitionlistitem, blank_finish = self.definition_list_item(context)
self.parent += definitionlistitem
self.blank_finish = blank_finish
return [], 'DefinitionList', []
class Line(SpecializedText):
"""
Second line of over- & underlined section title or transition marker.
"""
eofcheck = 1 # @@@ ???
"""Set to 0 while parsing sections, so that we don't catch the EOF."""
def eof(self, context):
"""Transition marker at end of section or document."""
marker = context[0].strip()
if self.memo.section_bubble_up_kludge:
self.memo.section_bubble_up_kludge = 0
elif len(marker) < 4:
self.state_correction(context)
if self.eofcheck: # ignore EOFError with sections
lineno = self.state_machine.abs_line_number() - 1
transition = nodes.transition(rawsource=context[0])
transition.line = lineno
self.parent += transition
self.eofcheck = 1
return []
def blank(self, match, context, next_state):
"""Transition marker."""
src, srcline = self.state_machine.get_source_and_line()
marker = context[0].strip()
if len(marker) < 4:
self.state_correction(context)
transition = nodes.transition(rawsource=marker)
transition.source = src
transition.line = srcline - 1
self.parent += transition
return [], 'Body', []
def text(self, match, context, next_state):
"""Potential over- & underlined title."""
lineno = self.state_machine.abs_line_number() - 1
src, srcline = self.state_machine.get_source_and_line()
overline = context[0]
title = match.string
underline = ''
try:
underline = self.state_machine.next_line()
except EOFError:
blocktext = overline + '\n' + title
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Incomplete section title.',
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline-1)
self.parent += msg
return [], 'Body', []
source = '%s\n%s\n%s' % (overline, title, underline)
overline = overline.rstrip()
underline = underline.rstrip()
if not self.transitions['underline'][0].match(underline):
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Missing matching underline for section title overline.',
nodes.literal_block(source, source),
source=src, line=srcline-1)
self.parent += msg
return [], 'Body', []
elif overline != underline:
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Title overline & underline mismatch.',
nodes.literal_block(source, source),
source=src, line=srcline-1)
self.parent += msg
return [], 'Body', []
title = title.rstrip()
messages = []
if column_width(title) > len(overline):
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.warning(
'Title overline too short.',
nodes.literal_block(source, source),
source=src, line=srcline-1)
messages.append(msg)
style = (overline[0], underline[0])
self.eofcheck = 0 # @@@ not sure this is correct
self.section(title.lstrip(), source, style, lineno + 1, messages)
self.eofcheck = 1
return [], 'Body', []
indent = text # indented title
def underline(self, match, context, next_state):
overline = context[0]
blocktext = overline + '\n' + self.state_machine.line
lineno = self.state_machine.abs_line_number() - 1
src, srcline = self.state_machine.get_source_and_line()
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 1)
msg = self.reporter.error(
'Invalid section title or transition marker.',
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline-1)
self.parent += msg
return [], 'Body', []
def short_overline(self, context, blocktext, lineno, lines=1):
src, srcline = self.state_machine.get_source_and_line(lineno)
msg = self.reporter.info(
'Possible incomplete section title.\nTreating the overline as '
"ordinary text because it's so short.",
source=src, line=srcline)
self.parent += msg
self.state_correction(context, lines)
def state_correction(self, context, lines=1):
self.state_machine.previous_line(lines)
context[:] = []
raise statemachine.StateCorrection('Body', 'text')
class QuotedLiteralBlock(RSTState):
"""
Nested parse handler for quoted (unindented) literal blocks.
Special-purpose. Not for inclusion in `state_classes`.
"""
patterns = {'initial_quoted': r'(%(nonalphanum7bit)s)' % Body.pats,
'text': r''}
initial_transitions = ('initial_quoted', 'text')
def __init__(self, state_machine, debug=0):
RSTState.__init__(self, state_machine, debug)
self.messages = []
self.initial_lineno = None
def blank(self, match, context, next_state):
if context:
raise EOFError
else:
return context, next_state, []
def eof(self, context):
if context:
src, srcline = self.state_machine.get_source_and_line(
self.initial_lineno)
text = '\n'.join(context)
literal_block = nodes.literal_block(text, text)
literal_block.source = src
literal_block.line = srcline
self.parent += literal_block
else:
self.parent += self.reporter.warning(
'Literal block expected; none found.',
line=self.state_machine.abs_line_number())
# src not available, because statemachine.input_lines is empty
self.state_machine.previous_line()
self.parent += self.messages
return []
def indent(self, match, context, next_state):
assert context, ('QuotedLiteralBlock.indent: context should not '
'be empty!')
self.messages.append(
self.reporter.error('Unexpected indentation.',
line=self.state_machine.abs_line_number()))
self.state_machine.previous_line()
raise EOFError
def initial_quoted(self, match, context, next_state):
"""Match arbitrary quote character on the first line only."""
self.remove_transition('initial_quoted')
quote = match.string[0]
pattern = re.compile(re.escape(quote))
# New transition matches consistent quotes only:
self.add_transition('quoted',
(pattern, self.quoted, self.__class__.__name__))
self.initial_lineno = self.state_machine.abs_line_number()
return [match.string], next_state, []
def quoted(self, match, context, next_state):
"""Match consistent quotes on subsequent lines."""
context.append(match.string)
return context, next_state, []
def text(self, match, context, next_state):
if context:
src, srcline = self.state_machine.get_source_and_line()
self.messages.append(
self.reporter.error('Inconsistent literal block quoting.',
source=src, line=srcline))
self.state_machine.previous_line()
raise EOFError
state_classes = (Body, BulletList, DefinitionList, EnumeratedList, FieldList,
OptionList, LineBlock, ExtensionOptions, Explicit, Text,
Definition, Line, SubstitutionDef, RFC2822Body, RFC2822List)
"""Standard set of State classes used to start `RSTStateMachine`."""
| bsd-3-clause |
th3sys/transfixed | use_case/push_items.py | 1 | 1901 | from __future__ import print_function # Python 2/3 compatibility
import boto3
import json
import uuid
import time
import decimal
#dynamodb = boto3.resource('dynamodb', region_name='us-east-1', endpoint_url="http://localhost:8000")
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
orders_table = dynamodb.Table('Orders')
sec_table = dynamodb.Table('Securities')
with open("securities.json") as json_file:
securities = json.load(json_file, parse_float = decimal.Decimal)
for security in securities:
Symbol = security['Symbol']
ProductType = security['ProductType']
SubscriptionEnabled = bool(security['SubscriptionEnabled'])
TradingEnabled = bool(security['TradingEnabled'])
Description = security['Description']
Risk = security['Risk']
print("Adding security:", Symbol)
sec_table.put_item(
Item={
'Symbol': Symbol,
'ProductType': ProductType,
'SubscriptionEnabled': SubscriptionEnabled,
'TradingEnabled':TradingEnabled,
'Description':Description,
'Risk':Risk
}
)
with open("orders.json") as json_file:
orders = json.load(json_file, parse_float = decimal.Decimal)
for order in orders:
NewOrderId = str(uuid.uuid4().hex) #int(order['NewOrderId'])
TransactionTime = str(time.time()) #order['TransactionTime']
ClientOrderId = int(order['ClientOrderId'])
Status = order['Status']
Details = order['Details']
print("Adding order:", NewOrderId, TransactionTime)
orders_table.put_item(
Item={
'NewOrderId': NewOrderId,
'TransactionTime': TransactionTime,
'ClientOrderId': ClientOrderId,
'Status':Status,
'Details':Details
}
)
| mit |
Buckmarble/Elite_Lunar_kernel | tools/perf/python/twatch.py | 7370 | 1334 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
oxc/Flexget | flexget/plugins/output/memusage.py | 3 | 1789 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
from flexget import options, plugin
from flexget.event import event
from flexget.terminal import console
try:
from guppy import hpy
except ImportError:
# this will leave the plugin unloaded
raise plugin.DependencyError(issued_by='memusage', missing='ext lib `guppy`', silent=True)
log = logging.getLogger('mem_usage')
"""
http://blog.mfabrik.com/2008/03/07/debugging-django-memory-leak-with-trackrefs-and-guppy/
# Print memory statistics
def update():
print heapy.heap()
# Print relative memory consumption since last sycle
def update():
print heapy.heap()
heapy.setref()
# Print relative memory consumption w/heap traversing
def update()
print heapy.heap().get_rp(40)
heapy.setref()
"""
heapy = None
@event('manager.startup')
def on_manager_startup(manager):
if not manager.options.mem_usage:
return
global heapy
heapy = hpy()
@event('manager.shutdown')
def on_manager_shutdown(manager):
if not manager.options.mem_usage:
return
import resource
console('Resource Module memory usage: %s (kb)' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
global heapy
console('Heapy module calculating memory usage:')
console(heapy.heap())
console('-' * 79)
console('Heapy module calculating report (this may take a while):')
console(heapy.heap().get_rp(40))
heapy = None
@event('options.register')
def register_parser_arguments():
options.get_parser().add_argument('--mem-usage', action='store_true', dest='mem_usage', default=False,
help='display memory usage debug information')
| mit |
AndreaEdwards/pdbtools | __init__.py | 3 | 1069 | __all__ = ["nmr_combine-peaks.py",
"pdb_addH.py",
"pdb_atom_renumber.py",
"pdb_bfactor.py",
"pdb_centermass.py",
"pdb_centerasu.py",
"pdb_clean.py",
"pdb_combiner.py",
"pdb_coulomb.py",
"pdb_dist-filter.py",
"pdb_disulfide.py",
"pdb_download.py",
"pdb_dssp.py",
"pdb_exper.py",
"pdb_ligand.py",
"pdb_moment.py",
"pdb_mutator.py",
"pdb_neighbors.py",
"pdb_offset.py",
"pdb_oligomer.py",
"pdb_param.py",
"pdb_pdb2dir.py",
"pdb_residue_renumber.py",
"pdb_sasa.py",
"pdb_satk.py",
"pdb_seq.py",
"pdb_splitnmr.py",
"pdb_strip.py",
"pdb_subset.py",
"pdb_torsion.py",
"pdb_water-contact.py"]
# Copyright 2007, Michael J. Harms
# This program is distributed under General Public License v. 3. See the file
# COPYING for a copy of the license.
| gpl-3.0 |
exceptionhandle/ImageProcessor.activity | Imaging/PIL/ImageMath.py | 16 | 6903 | #
# The Python Imaging Library
# $Id: ImageMath.py 2508 2005-09-12 19:01:03Z fredrik $
#
# a simple math add-on for the Python Imaging Library
#
# History:
# 1999-02-15 fl Original PIL Plus release
# 2005-05-05 fl Simplified and cleaned up for PIL 1.1.6
# 2005-09-12 fl Fixed int() and float() for Python 2.4.1
#
# Copyright (c) 1999-2005 by Secret Labs AB
# Copyright (c) 2005 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import Image
import _imagingmath
VERBOSE = 0
def _isconstant(v):
return isinstance(v, type(0)) or isinstance(v, type(0.0))
class _Operand:
# wraps an image operand, providing standard operators
def __init__(self, im):
self.im = im
def __fixup(self, im1):
# convert image to suitable mode
if isinstance(im1, _Operand):
# argument was an image.
if im1.im.mode in ("1", "L"):
return im1.im.convert("I")
elif im1.im.mode in ("I", "F"):
return im1.im
else:
raise ValueError, "unsupported mode: %s" % im1.im.mode
else:
# argument was a constant
if _isconstant(im1) and self.im.mode in ("1", "L", "I"):
return Image.new("I", self.im.size, im1)
else:
return Image.new("F", self.im.size, im1)
def apply(self, op, im1, im2=None, mode=None):
im1 = self.__fixup(im1)
if im2 is None:
# unary operation
out = Image.new(mode or im1.mode, im1.size, None)
im1.load()
try:
op = getattr(_imagingmath, op+"_"+im1.mode)
except AttributeError:
raise TypeError, "bad operand type for '%s'" % op
_imagingmath.unop(op, out.im.id, im1.im.id)
else:
# binary operation
im2 = self.__fixup(im2)
if im1.mode != im2.mode:
# convert both arguments to floating point
if im1.mode != "F": im1 = im1.convert("F")
if im2.mode != "F": im2 = im2.convert("F")
if im1.mode != im2.mode:
raise ValueError, "mode mismatch"
if im1.size != im2.size:
# crop both arguments to a common size
size = (min(im1.size[0], im2.size[0]),
min(im1.size[1], im2.size[1]))
if im1.size != size: im1 = im1.crop((0, 0) + size)
if im2.size != size: im2 = im2.crop((0, 0) + size)
out = Image.new(mode or im1.mode, size, None)
else:
out = Image.new(mode or im1.mode, im1.size, None)
im1.load(); im2.load()
try:
op = getattr(_imagingmath, op+"_"+im1.mode)
except AttributeError:
raise TypeError, "bad operand type for '%s'" % op
_imagingmath.binop(op, out.im.id, im1.im.id, im2.im.id)
return _Operand(out)
# unary operators
def __nonzero__(self):
# an image is "true" if it contains at least one non-zero pixel
return self.im.getbbox() is not None
def __abs__(self):
return self.apply("abs", self)
def __pos__(self):
return self
def __neg__(self):
return self.apply("neg", self)
# binary operators
def __add__(self, other):
return self.apply("add", self, other)
def __radd__(self, other):
return self.apply("add", other, self)
def __sub__(self, other):
return self.apply("sub", self, other)
def __rsub__(self, other):
return self.apply("sub", other, self)
def __mul__(self, other):
return self.apply("mul", self, other)
def __rmul__(self, other):
return self.apply("mul", other, self)
def __div__(self, other):
return self.apply("div", self, other)
def __rdiv__(self, other):
return self.apply("div", other, self)
def __mod__(self, other):
return self.apply("mod", self, other)
def __rmod__(self, other):
return self.apply("mod", other, self)
def __pow__(self, other):
return self.apply("pow", self, other)
def __rpow__(self, other):
return self.apply("pow", other, self)
# bitwise
def __invert__(self):
return self.apply("invert", self)
def __and__(self, other):
return self.apply("and", self, other)
def __rand__(self, other):
return self.apply("and", other, self)
def __or__(self, other):
return self.apply("or", self, other)
def __ror__(self, other):
return self.apply("or", other, self)
def __xor__(self, other):
return self.apply("xor", self, other)
def __rxor__(self, other):
return self.apply("xor", other, self)
def __lshift__(self, other):
return self.apply("lshift", self, other)
def __rshift__(self, other):
return self.apply("rshift", self, other)
# logical
def __eq__(self, other):
return self.apply("eq", self, other)
def __ne__(self, other):
return self.apply("ne", self, other)
def __lt__(self, other):
return self.apply("lt", self, other)
def __le__(self, other):
return self.apply("le", self, other)
def __gt__(self, other):
return self.apply("gt", self, other)
def __ge__(self, other):
return self.apply("ge", self, other)
# conversions
def imagemath_int(self):
return _Operand(self.im.convert("I"))
def imagemath_float(self):
return _Operand(self.im.convert("F"))
# logical
def imagemath_equal(self, other):
return self.apply("eq", self, other, mode="I")
def imagemath_notequal(self, other):
return self.apply("ne", self, other, mode="I")
def imagemath_min(self, other):
return self.apply("min", self, other)
def imagemath_max(self, other):
return self.apply("max", self, other)
def imagemath_convert(self, mode):
return _Operand(self.im.convert(mode))
ops = {}
for k, v in globals().items():
if k[:10] == "imagemath_":
ops[k[10:]] = v
##
# Evaluates an image expression.
#
# @param expression A string containing a Python-style expression.
# @keyparam options Values to add to the evaluation context. You
# can either use a dictionary, or one or more keyword arguments.
# @return The evaluated expression. This is usually an image object,
# but can also be an integer, a floating point value, or a pixel
# tuple, depending on the expression.
def eval(expression, _dict={}, **kw):
# build execution namespace
args = ops.copy()
args.update(_dict)
args.update(kw)
for k, v in args.items():
if hasattr(v, "im"):
args[k] = _Operand(v)
import __builtin__
out =__builtin__.eval(expression, args)
try:
return out.im
except AttributeError:
return out
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.