repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
vinhlh/bite-project | server/handlers/test_cycles.py | 17 | 1174 | # Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test cycle management."""
__author__ = 'alexis.torres@gmail.com (Alexis O. Torres)'
import logging
import webapp2
from common.handlers import base
from models import test_cycle
class TestCyclesHandler(base.BaseHandler):
"""Handles managing of cycles."""
def get(self):
cycles = test_cycle.FetchTestCycles()
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(test_cycle.JsonEncode(cycles))
app = webapp2.WSGIApplication(
[('/cycles', TestCyclesHandler),
('/cycles/all', TestCyclesHandler)],
debug=True)
| apache-2.0 |
akatsoulas/remo | remo/base/views.py | 3 | 10033 | import feedparser
import json
import logging
import requests
from django import http
from django.db.models import Q
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import User
from django.core.cache import cache
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.http import Http404, HttpResponse, HttpResponseBadRequest
from django.shortcuts import redirect, render
from django.views import generic
from django.views.decorators.cache import cache_control, never_cache
from django_statsd.clients import statsd
from mozilla_django_oidc.views import OIDCAuthenticationCallbackView
from raven.contrib.django.models import client
import forms
import utils
from remo.base.decorators import PermissionMixin, permission_check
from remo.base.forms import EmailMentorForm
from remo.featuredrep.models import FeaturedRep
from remo.profiles.forms import UserStatusForm
from remo.profiles.models import UserProfile, UserStatus
class OIDCCallbackView(OIDCAuthenticationCallbackView):
def login_failure(self, msg=''):
if not msg:
msg = ('Login failed. Please make sure that you are '
'an accepted Rep or a vouched Mozillian '
'and you use your Mozilla Login Identity to login. '
'This should be the same address as used when you got invited.')
messages.warning(self.request, msg)
return super(OIDCCallbackView, self).login_failure()
@cache_control(private=True, no_cache=True)
def main(request):
"""Main page of the website."""
featured_rep = utils.latest_object_or_none(FeaturedRep)
planet_feed = cache.get('planet')
if not planet_feed:
try:
planet_feed = requests.get(settings.PLANET_URL,
timeout=settings.PLANET_MAX_TIMEOUT).text
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError):
planet_feed = ''
else:
cache.set('planet', planet_feed, 60 * 60 * 8)
results = feedparser.parse(planet_feed).entries[:3]
return render(request, 'main.jinja', {'featuredrep': featured_rep,
'planet_entries': results})
def custom_404(request):
"""Custom 404 error handler."""
featured_rep = utils.latest_object_or_none(FeaturedRep)
return http.HttpResponseNotFound(render(request, '404.jinja',
{'featuredrep': featured_rep}))
def custom_500(request):
"""Custom 500 error handler."""
return http.HttpResponseServerError(render(request, '500.jinja'))
def robots_txt(request):
"""Generate a robots.txt.
Do not allow bots to crawl report pages (bug 923754).
"""
robots = 'User-agent: *\n'
if settings.ENGAGE_ROBOTS:
robots += 'Disallow: /reports/\n'
users = User.objects.filter(groups__name='Rep',
userprofile__registration_complete=True)
for user in users:
robots += ('Disallow: /u/{display_name}/r/\n'
.format(display_name=user.userprofile.display_name))
else:
robots += 'Disallow: /\n'
return http.HttpResponse(robots, content_type='text/plain')
@never_cache
@permission_check()
def edit_settings(request):
"""Edit user settings."""
user = request.user
if user.groups.filter(name='Mozillians').exists():
raise Http404
form = forms.EditSettingsForm(request.POST or None,
instance=user.userprofile)
if request.method == 'POST' and form.is_valid():
form.save()
for field in form.changed_data:
statsd.incr('base.edit_setting_%s' % field)
messages.success(request, 'Settings successfully edited.')
return redirect('dashboard')
return render(request, 'settings.jinja', {'user': user,
'settingsform': form})
@never_cache
@permission_check(permissions=['profiles.add_userstatus',
'profiles.change_userstatus'],
filter_field='display_name', owner_field='user',
model=UserProfile)
def edit_availability(request, display_name):
"""Edit availability settings."""
user = request.user
args = {}
created = False
if user.groups.filter(Q(name='Mozillians') | Q(name='Alumni')).exists():
raise Http404()
try:
status = (UserStatus.objects.filter(user=user)
.filter(is_unavailable=True).latest('created_on'))
except UserStatus.DoesNotExist:
status = UserStatus(user=user)
created = True
initial_data = {'is_replaced': False}
if not created:
initial_data['expected_date'] = (status.expected_date
.strftime('%d %B %Y'))
status_form = UserStatusForm(request.POST or None,
instance=status,
initial=initial_data)
email_mentor_form = EmailMentorForm(request.POST or None)
if status_form.is_valid():
status_form.save()
if created and email_mentor_form.is_valid():
expected_date = (status_form.cleaned_data['expected_date']
.strftime('%d %B %Y'))
start_date = (status_form.cleaned_data['start_date']
.strftime('%d %B %Y'))
msg = email_mentor_form.cleaned_data['body']
mentee = request.user.get_full_name()
template = 'emails/mentor_unavailability_notification.jinja'
subject = ('[Mentee {0}] Mentee will be unavailable starting '
'on {1} until {2}'.format(mentee,
start_date,
expected_date))
email_mentor_form.send_email(request, subject, msg, template,
{'user_status': status})
messages.success(request, 'Request submitted successfully.')
return redirect('dashboard')
args['status_form'] = status_form
args['email_form'] = email_mentor_form
args['created'] = created
return render(request, 'edit_availability.jinja', args)
class BaseListView(PermissionMixin, generic.ListView):
"""Base content list view."""
template_name = 'base_content_list.jinja'
create_object_url = None
def get_context_data(self, **kwargs):
context = super(BaseListView, self).get_context_data(**kwargs)
context['verbose_name'] = self.model._meta.verbose_name
context['verbose_name_plural'] = self.model._meta.verbose_name_plural
context['create_object_url'] = self.create_object_url
return context
class BaseCreateView(PermissionMixin, generic.CreateView):
"""Base content create view."""
template_name = 'base_content_edit.jinja'
def get_context_data(self, **kwargs):
context = super(BaseCreateView, self).get_context_data(**kwargs)
context['verbose_name'] = self.model._meta.verbose_name
context['creating'] = True
return context
def form_valid(self, form):
content = self.model._meta.verbose_name.capitalize()
messages.success(self.request, '%s succesfully created.' % content)
return super(BaseCreateView, self).form_valid(form)
class BaseUpdateView(PermissionMixin, generic.UpdateView):
"""Base content edit view."""
template_name = 'base_content_edit.jinja'
def get_context_data(self, **kwargs):
context = super(BaseUpdateView, self).get_context_data(**kwargs)
context['verbose_name'] = self.model._meta.verbose_name
context['creating'] = False
return context
def form_valid(self, form):
content = self.model._meta.verbose_name.capitalize()
messages.success(self.request, '%s succesfully updated.' % content)
return super(BaseUpdateView, self).form_valid(form)
def get(self, request, *args, **kwargs):
if getattr(self.get_object(), 'is_editable', True):
return super(BaseUpdateView, self).get(request, *args, **kwargs)
messages.error(self.request, 'Object cannot be updated.')
return redirect(self.success_url)
def post(self, request, *args, **kwargs):
if getattr(self.get_object(), 'is_editable', True):
return super(BaseUpdateView, self).post(request, *args, **kwargs)
messages.error(self.request, 'Object cannot be updated.')
return redirect(self.success_url)
class BaseDeleteView(PermissionMixin, generic.DeleteView):
"""Base content delete view."""
def delete(self, request, *args, **kwargs):
"""Override delete method to show message."""
if getattr(self.get_object(), 'is_editable', True):
content = self.model._meta.verbose_name.capitalize()
messages.success(self.request, '%s succesfully deleted.' % content)
return super(BaseDeleteView, self).delete(request, *args, **kwargs)
messages.error(self.request, 'Object cannot be deleted.')
return redirect(self.success_url)
@require_POST
@csrf_exempt
def capture_csp_violation(request):
data = client.get_data_from_request(request)
data.update({
'level': logging.INFO,
'logger': 'CSP',
})
try:
csp_data = json.loads(request.body)
except ValueError:
# Cannot decode CSP violation data, ignore
return HttpResponseBadRequest('Invalid CSP Report')
try:
blocked_uri = csp_data['csp-report']['blocked-uri']
except KeyError:
# Incomplete CSP report
return HttpResponseBadRequest('Incomplete CSP Report')
client.captureMessage(
message='CSP Violation: {}'.format(blocked_uri),
data=data)
return HttpResponse('Captured CSP violation, thanks for reporting.')
| bsd-3-clause |
TathagataChakraborti/resource-conflicts | PLANROB-2015/seq-sat-lama/Python-2.5.2/Lib/test/test_re.py | 6 | 36239 | import sys
sys.path = ['.'] + sys.path
from test.test_support import verbose, run_unittest
import re
from re import Scanner
import sys, os, traceback
from weakref import proxy
# Misc tests from Tim Peters' re.doc
# WARNING: Don't change details in these tests if you don't know
# what you're doing. Some of these tests were carefuly modeled to
# cover most of the code.
import unittest
class ReTests(unittest.TestCase):
def test_weakref(self):
s = 'QabbbcR'
x = re.compile('ab+c')
y = proxy(x)
self.assertEqual(x.findall('QabbbcR'), y.findall('QabbbcR'))
def test_search_star_plus(self):
self.assertEqual(re.search('x*', 'axx').span(0), (0, 0))
self.assertEqual(re.search('x*', 'axx').span(), (0, 0))
self.assertEqual(re.search('x+', 'axx').span(0), (1, 3))
self.assertEqual(re.search('x+', 'axx').span(), (1, 3))
self.assertEqual(re.search('x', 'aaa'), None)
self.assertEqual(re.match('a*', 'xxx').span(0), (0, 0))
self.assertEqual(re.match('a*', 'xxx').span(), (0, 0))
self.assertEqual(re.match('x*', 'xxxa').span(0), (0, 3))
self.assertEqual(re.match('x*', 'xxxa').span(), (0, 3))
self.assertEqual(re.match('a+', 'xxx'), None)
def bump_num(self, matchobj):
int_value = int(matchobj.group(0))
return str(int_value + 1)
def test_basic_re_sub(self):
self.assertEqual(re.sub("(?i)b+", "x", "bbbb BBBB"), 'x x')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y'),
'9.3 -3 24x100y')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y', 3),
'9.3 -3 23x99y')
self.assertEqual(re.sub('.', lambda m: r"\n", 'x'), '\\n')
self.assertEqual(re.sub('.', r"\n", 'x'), '\n')
s = r"\1\1"
self.assertEqual(re.sub('(.)', s, 'x'), 'xx')
self.assertEqual(re.sub('(.)', re.escape(s), 'x'), s)
self.assertEqual(re.sub('(.)', lambda m: s, 'x'), s)
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<a>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<unk>\g<unk>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<1>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('a',r'\t\n\v\r\f\a\b\B\Z\a\A\w\W\s\S\d\D','a'),
'\t\n\v\r\f\a\b\\B\\Z\a\\A\\w\\W\\s\\S\\d\\D')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'), '\t\n\v\r\f\a')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'),
(chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)))
self.assertEqual(re.sub('^\s*', 'X', 'test'), 'Xtest')
def test_bug_449964(self):
# fails for group followed by other escape
self.assertEqual(re.sub(r'(?P<unk>x)', '\g<1>\g<1>\\b', 'xx'),
'xx\bxx\b')
def test_bug_449000(self):
# Test for sub() on escaped characters
self.assertEqual(re.sub(r'\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub(r'\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
def test_bug_1140(self):
# re.sub(x, y, u'') should return u'', not '', and
# re.sub(x, y, '') should return '', not u''.
# Also:
# re.sub(x, y, unicode(x)) should return unicode(y), and
# re.sub(x, y, str(x)) should return
# str(y) if isinstance(y, str) else unicode(y).
for x in 'x', u'x':
for y in 'y', u'y':
z = re.sub(x, y, u'')
self.assertEqual(z, u'')
self.assertEqual(type(z), unicode)
#
z = re.sub(x, y, '')
self.assertEqual(z, '')
self.assertEqual(type(z), str)
#
z = re.sub(x, y, unicode(x))
self.assertEqual(z, y)
self.assertEqual(type(z), unicode)
#
z = re.sub(x, y, str(x))
self.assertEqual(z, y)
self.assertEqual(type(z), type(y))
def test_sub_template_numeric_escape(self):
# bug 776311 and friends
self.assertEqual(re.sub('x', r'\0', 'x'), '\0')
self.assertEqual(re.sub('x', r'\000', 'x'), '\000')
self.assertEqual(re.sub('x', r'\001', 'x'), '\001')
self.assertEqual(re.sub('x', r'\008', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\009', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\111', 'x'), '\111')
self.assertEqual(re.sub('x', r'\117', 'x'), '\117')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\1111')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\111' + '1')
self.assertEqual(re.sub('x', r'\00', 'x'), '\x00')
self.assertEqual(re.sub('x', r'\07', 'x'), '\x07')
self.assertEqual(re.sub('x', r'\08', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\09', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\0a', 'x'), '\0' + 'a')
self.assertEqual(re.sub('x', r'\400', 'x'), '\0')
self.assertEqual(re.sub('x', r'\777', 'x'), '\377')
self.assertRaises(re.error, re.sub, 'x', r'\1', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\8', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\9', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\11', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\18', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\1a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\90', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\99', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\118', 'x') # r'\11' + '8'
self.assertRaises(re.error, re.sub, 'x', r'\11a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\181', 'x') # r'\18' + '1'
self.assertRaises(re.error, re.sub, 'x', r'\800', 'x') # r'\80' + '0'
# in python2.3 (etc), these loop endlessly in sre_parser.py
self.assertEqual(re.sub('(((((((((((x)))))))))))', r'\11', 'x'), 'x')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\118', 'xyz'),
'xz8')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\11a', 'xyz'),
'xza')
def test_qualified_re_sub(self):
self.assertEqual(re.sub('a', 'b', 'aaaaa'), 'bbbbb')
self.assertEqual(re.sub('a', 'b', 'aaaaa', 1), 'baaaa')
def test_bug_114660(self):
self.assertEqual(re.sub(r'(\S)\s+(\S)', r'\1 \2', 'hello there'),
'hello there')
def test_bug_462270(self):
# Test for empty sub() behaviour, see SF bug #462270
self.assertEqual(re.sub('x*', '-', 'abxd'), '-a-b-d-')
self.assertEqual(re.sub('x+', '-', 'abxd'), 'ab-d')
def test_symbolic_refs(self):
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a a>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<1a1>', 'xx')
self.assertRaises(IndexError, re.sub, '(?P<a>x)', '\g<ab>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\g<b>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\\2', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<-1>', 'xx')
def test_re_subn(self):
self.assertEqual(re.subn("(?i)b+", "x", "bbbb BBBB"), ('x x', 2))
self.assertEqual(re.subn("b+", "x", "bbbb BBBB"), ('x BBBB', 1))
self.assertEqual(re.subn("b+", "x", "xyz"), ('xyz', 0))
self.assertEqual(re.subn("b*", "x", "xyz"), ('xxxyxzx', 4))
self.assertEqual(re.subn("b*", "x", "xyz", 2), ('xxxyz', 2))
def test_re_split(self):
self.assertEqual(re.split(":", ":a:b::c"), ['', 'a', 'b', '', 'c'])
self.assertEqual(re.split(":*", ":a:b::c"), ['', 'a', 'b', 'c'])
self.assertEqual(re.split("(:*)", ":a:b::c"),
['', ':', 'a', ':', 'b', '::', 'c'])
self.assertEqual(re.split("(?::*)", ":a:b::c"), ['', 'a', 'b', 'c'])
self.assertEqual(re.split("(:)*", ":a:b::c"),
['', ':', 'a', ':', 'b', ':', 'c'])
self.assertEqual(re.split("([b:]+)", ":a:b::c"),
['', ':', 'a', ':b::', 'c'])
self.assertEqual(re.split("(b)|(:+)", ":a:b::c"),
['', None, ':', 'a', None, ':', '', 'b', None, '',
None, '::', 'c'])
self.assertEqual(re.split("(?:b)|(?::+)", ":a:b::c"),
['', 'a', '', '', 'c'])
def test_qualified_re_split(self):
self.assertEqual(re.split(":", ":a:b::c", 2), ['', 'a', 'b::c'])
self.assertEqual(re.split(':', 'a:b:c:d', 2), ['a', 'b', 'c:d'])
self.assertEqual(re.split("(:)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
self.assertEqual(re.split("(:*)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
def test_re_findall(self):
self.assertEqual(re.findall(":+", "abc"), [])
self.assertEqual(re.findall(":+", "a:b::c:::d"), [":", "::", ":::"])
self.assertEqual(re.findall("(:+)", "a:b::c:::d"), [":", "::", ":::"])
self.assertEqual(re.findall("(:)(:*)", "a:b::c:::d"), [(":", ""),
(":", ":"),
(":", "::")])
def test_bug_117612(self):
self.assertEqual(re.findall(r"(a|(b))", "aba"),
[("a", ""),("b", "b"),("a", "")])
def test_re_match(self):
self.assertEqual(re.match('a', 'a').groups(), ())
self.assertEqual(re.match('(a)', 'a').groups(), ('a',))
self.assertEqual(re.match(r'(a)', 'a').group(0), 'a')
self.assertEqual(re.match(r'(a)', 'a').group(1), 'a')
self.assertEqual(re.match(r'(a)', 'a').group(1, 1), ('a', 'a'))
pat = re.compile('((a)|(b))(c)?')
self.assertEqual(pat.match('a').groups(), ('a', 'a', None, None))
self.assertEqual(pat.match('b').groups(), ('b', None, 'b', None))
self.assertEqual(pat.match('ac').groups(), ('a', 'a', None, 'c'))
self.assertEqual(pat.match('bc').groups(), ('b', None, 'b', 'c'))
self.assertEqual(pat.match('bc').groups(""), ('b', "", 'b', 'c'))
# A single group
m = re.match('(a)', 'a')
self.assertEqual(m.group(0), 'a')
self.assertEqual(m.group(0), 'a')
self.assertEqual(m.group(1), 'a')
self.assertEqual(m.group(1, 1), ('a', 'a'))
pat = re.compile('(?:(?P<a1>a)|(?P<b2>b))(?P<c3>c)?')
self.assertEqual(pat.match('a').group(1, 2, 3), ('a', None, None))
self.assertEqual(pat.match('b').group('a1', 'b2', 'c3'),
(None, 'b', None))
self.assertEqual(pat.match('ac').group(1, 'b2', 3), ('a', None, 'c'))
def test_re_groupref_exists(self):
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a)').groups(),
('(', 'a'))
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a').groups(),
(None, 'a'))
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a)'), None)
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a'), None)
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'ab').groups(),
('a', 'b'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'cd').groups(),
(None, 'd'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'cd').groups(),
(None, 'd'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'a').groups(),
('a', ''))
# Tests for bug #1177831: exercise groups other than the first group
p = re.compile('(?P<g1>a)(?P<g2>b)?((?(g2)c|d))')
self.assertEqual(p.match('abc').groups(),
('a', 'b', 'c'))
self.assertEqual(p.match('ad').groups(),
('a', None, 'd'))
self.assertEqual(p.match('abd'), None)
self.assertEqual(p.match('ac'), None)
def test_re_groupref(self):
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a|').groups(),
('|', 'a'))
self.assertEqual(re.match(r'^(\|)?([^()]+)\1?$', 'a').groups(),
(None, 'a'))
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', 'a|'), None)
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a'), None)
self.assertEqual(re.match(r'^(?:(a)|c)(\1)$', 'aa').groups(),
('a', 'a'))
self.assertEqual(re.match(r'^(?:(a)|c)(\1)?$', 'c').groups(),
(None, None))
def test_groupdict(self):
self.assertEqual(re.match('(?P<first>first) (?P<second>second)',
'first second').groupdict(),
{'first':'first', 'second':'second'})
def test_expand(self):
self.assertEqual(re.match("(?P<first>first) (?P<second>second)",
"first second")
.expand(r"\2 \1 \g<second> \g<first>"),
"second first second first")
def test_repeat_minmax(self):
self.assertEqual(re.match("^(\w){1}$", "abc"), None)
self.assertEqual(re.match("^(\w){1}?$", "abc"), None)
self.assertEqual(re.match("^(\w){1,2}$", "abc"), None)
self.assertEqual(re.match("^(\w){1,2}?$", "abc"), None)
self.assertEqual(re.match("^(\w){3}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,3}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,4}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,3}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^x{1}$", "xxx"), None)
self.assertEqual(re.match("^x{1}?$", "xxx"), None)
self.assertEqual(re.match("^x{1,2}$", "xxx"), None)
self.assertEqual(re.match("^x{1,2}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3}$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,3}$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,4}$", "xxx"), None)
self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,3}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,4}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None)
self.assertEqual(re.match("^x{}$", "xxx"), None)
self.assertNotEqual(re.match("^x{}$", "x{}"), None)
def test_getattr(self):
self.assertEqual(re.match("(a)", "a").pos, 0)
self.assertEqual(re.match("(a)", "a").endpos, 1)
self.assertEqual(re.match("(a)", "a").string, "a")
self.assertEqual(re.match("(a)", "a").regs, ((0, 1), (0, 1)))
self.assertNotEqual(re.match("(a)", "a").re, None)
def test_special_escapes(self):
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx").group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd").group(1), "bx")
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx", re.LOCALE).group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd", re.LOCALE).group(1), "bx")
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx", re.UNICODE).group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd", re.UNICODE).group(1), "bx")
self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None)
self.assertEqual(re.search(r"\b(b.)\b",
u"abcd abc bcd bx").group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
u"abc bcd bc abxd").group(1), "bx")
self.assertEqual(re.search(r"^abc$", u"\nabc\n", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", u"abc", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", u"\nabc\n", re.M), None)
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a").group(0), "1aa! a")
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.LOCALE).group(0), "1aa! a")
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.UNICODE).group(0), "1aa! a")
def test_ignore_case(self):
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match("abc", u"ABC", re.I).group(0), "ABC")
def test_bigcharset(self):
self.assertEqual(re.match(u"([\u2222\u2223])",
u"\u2222").group(1), u"\u2222")
self.assertEqual(re.match(u"([\u2222\u2223])",
u"\u2222", re.UNICODE).group(1), u"\u2222")
def test_anyall(self):
self.assertEqual(re.match("a.b", "a\nb", re.DOTALL).group(0),
"a\nb")
self.assertEqual(re.match("a.*b", "a\n\nb", re.DOTALL).group(0),
"a\n\nb")
def test_non_consuming(self):
self.assertEqual(re.match("(a(?=\s[^a]))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[^a]*))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[abc]))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[abc]*))", "a bc").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s\1)", "a a").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s\1*)", "a aa").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s(abc|a))", "a a").group(1), "a")
self.assertEqual(re.match(r"(a(?!\s[^a]))", "a a").group(1), "a")
self.assertEqual(re.match(r"(a(?!\s[abc]))", "a d").group(1), "a")
self.assertEqual(re.match(r"(a)(?!\s\1)", "a b").group(1), "a")
self.assertEqual(re.match(r"(a)(?!\s(abc|a))", "a b").group(1), "a")
def test_ignore_case(self):
self.assertEqual(re.match(r"(a\s[^a])", "a b", re.I).group(1), "a b")
self.assertEqual(re.match(r"(a\s[^a]*)", "a bb", re.I).group(1), "a bb")
self.assertEqual(re.match(r"(a\s[abc])", "a b", re.I).group(1), "a b")
self.assertEqual(re.match(r"(a\s[abc]*)", "a bb", re.I).group(1), "a bb")
self.assertEqual(re.match(r"((a)\s\2)", "a a", re.I).group(1), "a a")
self.assertEqual(re.match(r"((a)\s\2*)", "a aa", re.I).group(1), "a aa")
self.assertEqual(re.match(r"((a)\s(abc|a))", "a a", re.I).group(1), "a a")
self.assertEqual(re.match(r"((a)\s(abc|a)*)", "a aa", re.I).group(1), "a aa")
def test_category(self):
self.assertEqual(re.match(r"(\s)", " ").group(1), " ")
def test_getlower(self):
import _sre
self.assertEqual(_sre.getlower(ord('A'), 0), ord('a'))
self.assertEqual(_sre.getlower(ord('A'), re.LOCALE), ord('a'))
self.assertEqual(_sre.getlower(ord('A'), re.UNICODE), ord('a'))
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match("abc", u"ABC", re.I).group(0), "ABC")
def test_not_literal(self):
self.assertEqual(re.search("\s([^a])", " b").group(1), "b")
self.assertEqual(re.search("\s([^a]*)", " bb").group(1), "bb")
def test_search_coverage(self):
self.assertEqual(re.search("\s(b)", " b").group(1), "b")
self.assertEqual(re.search("a\s", "a ").group(0), "a ")
def test_re_escape(self):
p=""
for i in range(0, 256):
p = p + chr(i)
self.assertEqual(re.match(re.escape(chr(i)), chr(i)) is not None,
True)
self.assertEqual(re.match(re.escape(chr(i)), chr(i)).span(), (0,1))
pat=re.compile(re.escape(p))
self.assertEqual(pat.match(p) is not None, True)
self.assertEqual(pat.match(p).span(), (0,256))
def test_pickling(self):
import pickle
self.pickle_test(pickle)
import cPickle
self.pickle_test(cPickle)
# old pickles expect the _compile() reconstructor in sre module
import warnings
original_filters = warnings.filters[:]
try:
warnings.filterwarnings("ignore", "The sre module is deprecated",
DeprecationWarning)
from sre import _compile
finally:
warnings.filters = original_filters
def pickle_test(self, pickle):
oldpat = re.compile('a(?:b|(c|e){1,2}?|d)+?(.)')
s = pickle.dumps(oldpat)
newpat = pickle.loads(s)
self.assertEqual(oldpat, newpat)
def test_constants(self):
self.assertEqual(re.I, re.IGNORECASE)
self.assertEqual(re.L, re.LOCALE)
self.assertEqual(re.M, re.MULTILINE)
self.assertEqual(re.S, re.DOTALL)
self.assertEqual(re.X, re.VERBOSE)
def test_flags(self):
for flag in [re.I, re.M, re.X, re.S, re.L]:
self.assertNotEqual(re.compile('^pattern$', flag), None)
def test_sre_character_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255]:
self.assertNotEqual(re.match(r"\%03o" % i, chr(i)), None)
self.assertNotEqual(re.match(r"\%03o0" % i, chr(i)+"0"), None)
self.assertNotEqual(re.match(r"\%03o8" % i, chr(i)+"8"), None)
self.assertNotEqual(re.match(r"\x%02x" % i, chr(i)), None)
self.assertNotEqual(re.match(r"\x%02x0" % i, chr(i)+"0"), None)
self.assertNotEqual(re.match(r"\x%02xz" % i, chr(i)+"z"), None)
self.assertRaises(re.error, re.match, "\911", "")
def test_sre_character_class_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255]:
self.assertNotEqual(re.match(r"[\%03o]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\%03o0]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\%03o8]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\x%02x]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\x%02x0]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\x%02xz]" % i, chr(i)), None)
self.assertRaises(re.error, re.match, "[\911]", "")
def test_bug_113254(self):
self.assertEqual(re.match(r'(a)|(b)', 'b').start(1), -1)
self.assertEqual(re.match(r'(a)|(b)', 'b').end(1), -1)
self.assertEqual(re.match(r'(a)|(b)', 'b').span(1), (-1, -1))
def test_bug_527371(self):
# bug described in patches 527371/672491
self.assertEqual(re.match(r'(a)?a','a').lastindex, None)
self.assertEqual(re.match(r'(a)(b)?b','ab').lastindex, 1)
self.assertEqual(re.match(r'(?P<a>a)(?P<b>b)?b','ab').lastgroup, 'a')
self.assertEqual(re.match("(?P<a>a(b))", "ab").lastgroup, 'a')
self.assertEqual(re.match("((a))", "a").lastindex, 1)
def test_bug_545855(self):
# bug 545855 -- This pattern failed to cause a compile error as it
# should, instead provoking a TypeError.
self.assertRaises(re.error, re.compile, 'foo[a-')
def test_bug_418626(self):
# bugs 418626 at al. -- Testing Greg Chapman's addition of op code
# SRE_OP_MIN_REPEAT_ONE for eliminating recursion on simple uses of
# pattern '*?' on a long string.
self.assertEqual(re.match('.*?c', 10000*'ab'+'cd').end(0), 20001)
self.assertEqual(re.match('.*?cd', 5000*'ab'+'c'+5000*'ab'+'cde').end(0),
20003)
self.assertEqual(re.match('.*?cd', 20000*'abc'+'de').end(0), 60001)
# non-simple '*?' still used to hit the recursion limit, before the
# non-recursive scheme was implemented.
self.assertEqual(re.search('(a|b)*?c', 10000*'ab'+'cd').end(0), 20001)
def test_bug_612074(self):
pat=u"["+re.escape(u"\u2039")+u"]"
self.assertEqual(re.compile(pat) and 1, 1)
def test_stack_overflow(self):
# nasty cases that used to overflow the straightforward recursive
# implementation of repeated groups.
self.assertEqual(re.match('(x)*', 50000*'x').group(1), 'x')
self.assertEqual(re.match('(x)*y', 50000*'x'+'y').group(1), 'x')
self.assertEqual(re.match('(x)*?y', 50000*'x'+'y').group(1), 'x')
def test_scanner(self):
def s_ident(scanner, token): return token
def s_operator(scanner, token): return "op%s" % token
def s_float(scanner, token): return float(token)
def s_int(scanner, token): return int(token)
scanner = Scanner([
(r"[a-zA-Z_]\w*", s_ident),
(r"\d+\.\d*", s_float),
(r"\d+", s_int),
(r"=|\+|-|\*|/", s_operator),
(r"\s+", None),
])
self.assertNotEqual(scanner.scanner.scanner("").pattern, None)
self.assertEqual(scanner.scan("sum = 3*foo + 312.50 + bar"),
(['sum', 'op=', 3, 'op*', 'foo', 'op+', 312.5,
'op+', 'bar'], ''))
def test_bug_448951(self):
# bug 448951 (similar to 429357, but with single char match)
# (Also test greedy matches.)
for op in '','?','*':
self.assertEqual(re.match(r'((.%s):)?z'%op, 'z').groups(),
(None, None))
self.assertEqual(re.match(r'((.%s):)?z'%op, 'a:z').groups(),
('a:', 'a'))
def test_bug_725106(self):
# capturing groups in alternatives in repeats
self.assertEqual(re.match('^((a)|b)*', 'abc').groups(),
('b', 'a'))
self.assertEqual(re.match('^(([ab])|c)*', 'abc').groups(),
('c', 'b'))
self.assertEqual(re.match('^((d)|[ab])*', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)c|[ab])*', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)|b)*?c', 'abc').groups(),
('b', 'a'))
self.assertEqual(re.match('^(([ab])|c)*?d', 'abcd').groups(),
('c', 'b'))
self.assertEqual(re.match('^((d)|[ab])*?c', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)c|[ab])*?c', 'abc').groups(),
('b', None))
def test_bug_725149(self):
# mark_stack_base restoring before restoring marks
self.assertEqual(re.match('(a)(?:(?=(b)*)c)*', 'abb').groups(),
('a', None))
self.assertEqual(re.match('(a)((?!(b)*))*', 'abb').groups(),
('a', None, None))
def test_bug_764548(self):
# bug 764548, re.compile() barfs on str/unicode subclasses
try:
unicode
except NameError:
return # no problem if we have no unicode
class my_unicode(unicode): pass
pat = re.compile(my_unicode("abc"))
self.assertEqual(pat.match("xyz"), None)
def test_finditer(self):
iter = re.finditer(r":+", "a:b::c:::d")
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
def test_bug_926075(self):
try:
unicode
except NameError:
return # no problem if we have no unicode
self.assert_(re.compile('bug_926075') is not
re.compile(eval("u'bug_926075'")))
def test_bug_931848(self):
try:
unicode
except NameError:
pass
pattern = eval('u"[\u002E\u3002\uFF0E\uFF61]"')
self.assertEqual(re.compile(pattern).split("a.b.c"),
['a','b','c'])
def test_bug_581080(self):
iter = re.finditer(r"\s", "a b")
self.assertEqual(iter.next().span(), (1,2))
self.assertRaises(StopIteration, iter.next)
scanner = re.compile(r"\s").scanner("a b")
self.assertEqual(scanner.search().span(), (1, 2))
self.assertEqual(scanner.search(), None)
def test_bug_817234(self):
iter = re.finditer(r".*", "asdf")
self.assertEqual(iter.next().span(), (0, 4))
self.assertEqual(iter.next().span(), (4, 4))
self.assertRaises(StopIteration, iter.next)
def test_empty_array(self):
# SF buf 1647541
import array
for typecode in 'cbBuhHiIlLfd':
a = array.array(typecode)
self.assertEqual(re.compile("bla").match(a), None)
self.assertEqual(re.compile("").match(a).groups(), ())
def test_inline_flags(self):
# Bug #1700
upper_char = unichr(0x1ea0) # Latin Capital Letter A with Dot Bellow
lower_char = unichr(0x1ea1) # Latin Small Letter A with Dot Bellow
p = re.compile(upper_char, re.I | re.U)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile(lower_char, re.I | re.U)
q = p.match(upper_char)
self.assertNotEqual(q, None)
p = re.compile('(?i)' + upper_char, re.U)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile('(?i)' + lower_char, re.U)
q = p.match(upper_char)
self.assertNotEqual(q, None)
p = re.compile('(?iu)' + upper_char)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile('(?iu)' + lower_char)
q = p.match(upper_char)
self.assertNotEqual(q, None)
def run_re_tests():
from test.re_tests import benchmarks, tests, SUCCEED, FAIL, SYNTAX_ERROR
if verbose:
print 'Running re_tests test suite'
else:
# To save time, only run the first and last 10 tests
#tests = tests[:10] + tests[-10:]
pass
for t in tests:
sys.stdout.flush()
pattern = s = outcome = repl = expected = None
if len(t) == 5:
pattern, s, outcome, repl, expected = t
elif len(t) == 3:
pattern, s, outcome = t
else:
raise ValueError, ('Test tuples should have 3 or 5 fields', t)
try:
obj = re.compile(pattern)
except re.error:
if outcome == SYNTAX_ERROR: pass # Expected a syntax error
else:
print '=== Syntax error:', t
except KeyboardInterrupt: raise KeyboardInterrupt
except:
print '*** Unexpected error ***', t
if verbose:
traceback.print_exc(file=sys.stdout)
else:
try:
result = obj.search(s)
except re.error, msg:
print '=== Unexpected exception', t, repr(msg)
if outcome == SYNTAX_ERROR:
# This should have been a syntax error; forget it.
pass
elif outcome == FAIL:
if result is None: pass # No match, as expected
else: print '=== Succeeded incorrectly', t
elif outcome == SUCCEED:
if result is not None:
# Matched, as expected, so now we compute the
# result string and compare it to our expected result.
start, end = result.span(0)
vardict={'found': result.group(0),
'groups': result.group(),
'flags': result.re.flags}
for i in range(1, 100):
try:
gi = result.group(i)
# Special hack because else the string concat fails:
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict['g%d' % i] = gi
for i in result.re.groupindex.keys():
try:
gi = result.group(i)
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict[i] = gi
repl = eval(repl, vardict)
if repl != expected:
print '=== grouping error', t,
print repr(repl) + ' should be ' + repr(expected)
else:
print '=== Failed incorrectly', t
# Try the match on a unicode string, and check that it
# still succeeds.
try:
result = obj.search(unicode(s, "latin-1"))
if result is None:
print '=== Fails on unicode match', t
except NameError:
continue # 1.5.2
except TypeError:
continue # unicode test case
# Try the match on a unicode pattern, and check that it
# still succeeds.
obj=re.compile(unicode(pattern, "latin-1"))
result = obj.search(s)
if result is None:
print '=== Fails on unicode pattern match', t
# Try the match with the search area limited to the extent
# of the match and see if it still succeeds. \B will
# break (because it won't match at the end or start of a
# string), so we'll ignore patterns that feature it.
if pattern[:2] != '\\B' and pattern[-2:] != '\\B' \
and result is not None:
obj = re.compile(pattern)
result = obj.search(s, result.start(0), result.end(0) + 1)
if result is None:
print '=== Failed on range-limited match', t
# Try the match with IGNORECASE enabled, and check that it
# still succeeds.
obj = re.compile(pattern, re.IGNORECASE)
result = obj.search(s)
if result is None:
print '=== Fails on case-insensitive match', t
# Try the match with LOCALE enabled, and check that it
# still succeeds.
obj = re.compile(pattern, re.LOCALE)
result = obj.search(s)
if result is None:
print '=== Fails on locale-sensitive match', t
# Try the match with UNICODE locale enabled, and check
# that it still succeeds.
obj = re.compile(pattern, re.UNICODE)
result = obj.search(s)
if result is None:
print '=== Fails on unicode-sensitive match', t
def test_main():
run_unittest(ReTests)
run_re_tests()
if __name__ == "__main__":
test_main()
| mit |
zhoulingjun/django | tests/admin_changelist/tests.py | 155 | 40267 | from __future__ import unicode_literals
import datetime
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.templatetags.admin_list import pagination
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.contrib.admin.views.main import ALL_VAR, SEARCH_VAR, ChangeList
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.template import Context, Template
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.utils import formats, six
from .admin import (
BandAdmin, ChildAdmin, ChordsBandAdmin, ConcertAdmin,
CustomPaginationAdmin, CustomPaginator, DynamicListDisplayChildAdmin,
DynamicListDisplayLinksChildAdmin, DynamicListFilterChildAdmin,
DynamicSearchFieldsChildAdmin, EmptyValueChildAdmin, FilteredChildAdmin,
GroupAdmin, InvitationAdmin, NoListDisplayLinksParentAdmin, ParentAdmin,
QuartetAdmin, SwallowAdmin, site as custom_site,
)
from .models import (
Band, Child, ChordsBand, ChordsMusician, Concert, CustomIdUser, Event,
Genre, Group, Invitation, Membership, Musician, OrderedObject, Parent,
Quartet, Swallow, SwallowOneToOne, UnorderedObject,
)
@override_settings(ROOT_URLCONF="admin_changelist.urls")
class ChangeListTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
def _create_superuser(self, username):
return User.objects.create(username=username, is_superuser=True)
def _mocked_authenticated_request(self, url, user):
request = self.factory.get(url)
request.user = user
return request
def test_select_related_preserved(self):
"""
Regression test for #10348: ChangeList.get_queryset() shouldn't
overwrite a custom select_related provided by ModelAdmin.get_queryset().
"""
m = ChildAdmin(Child, custom_site)
request = self.factory.get('/child/')
list_select_related = m.get_list_select_related(request)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
self.assertEqual(cl.queryset.query.select_related, {
'parent': {'name': {}}
})
def test_select_related_as_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
request = self.factory.get('/invitation/')
list_select_related = ia.get_list_select_related(request)
cl = ChangeList(request, Child, ia.list_display, ia.list_display_links,
ia.list_filter, ia.date_hierarchy, ia.search_fields,
list_select_related, ia.list_per_page,
ia.list_max_show_all, ia.list_editable, ia)
self.assertEqual(cl.queryset.query.select_related, {'player': {}})
def test_select_related_as_empty_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
ia.list_select_related = ()
request = self.factory.get('/invitation/')
list_select_related = ia.get_list_select_related(request)
cl = ChangeList(request, Child, ia.list_display, ia.list_display_links,
ia.list_filter, ia.date_hierarchy, ia.search_fields,
list_select_related, ia.list_per_page,
ia.list_max_show_all, ia.list_editable, ia)
self.assertEqual(cl.queryset.query.select_related, False)
def test_get_select_related_custom_method(self):
class GetListSelectRelatedAdmin(admin.ModelAdmin):
list_display = ('band', 'player')
def get_list_select_related(self, request):
return ('band', 'player')
ia = GetListSelectRelatedAdmin(Invitation, custom_site)
request = self.factory.get('/invitation/')
list_select_related = ia.get_list_select_related(request)
cl = ChangeList(request, Child, ia.list_display, ia.list_display_links,
ia.list_filter, ia.date_hierarchy, ia.search_fields,
list_select_related, ia.list_per_page,
ia.list_max_show_all, ia.list_editable, ia)
self.assertEqual(cl.queryset.query.select_related, {'player': {}, 'band': {}})
def test_result_list_empty_changelist_value(self):
"""
Regression test for #14982: EMPTY_CHANGELIST_VALUE should be honored
for relationship fields
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-parent nowrap">-</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_set_empty_value_display_on_admin_site(self):
"""
Test that empty value display can be set on AdminSite
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
# Set a new empty display value on AdminSite.
admin.site.empty_value_display = '???'
m = ChildAdmin(Child, admin.site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-parent nowrap">???</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_set_empty_value_display_in_model_admin(self):
"""
Test that empty value display can be set in ModelAdmin or individual fields.
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = EmptyValueChildAdmin(Child, admin.site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-age_display">†</td><td class="field-age">-empty-</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_html(self):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-parent nowrap">Parent object</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_editable_html(self):
"""
Regression tests for #11791: Inclusion tag result_list generates a
table and this checks that the items are nested within the table
element tags.
Also a regression test for #13599, verifies that hidden fields
when list_editable is enabled are rendered in a div outside the
table.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
# make sure that hidden fields are in the correct place
hiddenfields_div = '<div class="hiddenfields"><input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></div>' % new_child.id
self.assertInHTML(hiddenfields_div, table_output, msg_prefix='Failed to find hidden fields')
# make sure that list editable fields are rendered in divs correctly
editable_name_field = '<input name="form-0-name" value="name" class="vTextField" maxlength="30" type="text" id="id_form-0-name" />'
self.assertInHTML('<td class="field-name">%s</td>' % editable_name_field, table_output, msg_prefix='Failed to find "name" list_editable field')
def test_result_list_editable(self):
"""
Regression test for #14312: list_editable with pagination
"""
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/', data={'p': -1}) # Anything outside range
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
self.assertRaises(IncorrectLookupParameters, lambda:
ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m))
def test_custom_paginator(self):
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/')
m = CustomPaginationAdmin(Child, custom_site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
self.assertIsInstance(cl.paginator, CustomPaginator)
def test_distinct_for_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Basic ManyToMany.
"""
blues = Genre.objects.create(name='Blues')
band = Band.objects.create(name='B.B. King Review', nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, custom_site)
request = self.factory.get('/band/', data={'genres': blues.pk})
cl = ChangeList(request, Band, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. With an intermediate model.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = GroupAdmin(Group, custom_site)
request = self.factory.get('/group/', data={'members': lead.pk})
cl = ChangeList(request, Group, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_at_second_level_in_list_filter(self):
"""
When using a ManyToMany in list_filter at the second level behind a
ForeignKey, distinct() must be called and results shouldn't appear more
than once.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Concert.objects.create(name='Woodstock', group=band)
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = ConcertAdmin(Concert, custom_site)
request = self.factory.get('/concert/', data={'group__members': lead.pk})
cl = ChangeList(request, Concert, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Concert instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_inherited_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Model managed in the
admin inherits from the one that defins the relationship.
"""
lead = Musician.objects.create(name='John')
four = Quartet.objects.create(name='The Beatles')
Membership.objects.create(group=four, music=lead, role='lead voice')
Membership.objects.create(group=four, music=lead, role='guitar player')
m = QuartetAdmin(Quartet, custom_site)
request = self.factory.get('/quartet/', data={'members': lead.pk})
cl = ChangeList(request, Quartet, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Quartet instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_m2m_to_inherited_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Target of the relationship
inherits from another.
"""
lead = ChordsMusician.objects.create(name='Player A')
three = ChordsBand.objects.create(name='The Chords Trio')
Invitation.objects.create(band=three, player=lead, instrument='guitar')
Invitation.objects.create(band=three, player=lead, instrument='bass')
m = ChordsBandAdmin(ChordsBand, custom_site)
request = self.factory.get('/chordsband/', data={'members': lead.pk})
cl = ChangeList(request, ChordsBand, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one ChordsBand instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_non_unique_related_object_in_list_filter(self):
"""
Regressions tests for #15819: If a field listed in list_filters
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
# Two children with the same name
Child.objects.create(parent=parent, name='Daniel')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, custom_site)
request = self.factory.get('/parent/', data={'child__name': 'Daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_non_unique_related_object_in_search_fields(self):
"""
Regressions tests for #15819: If a field listed in search_fields
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
Child.objects.create(parent=parent, name='Danielle')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, custom_site)
request = self.factory.get('/parent/', data={SEARCH_VAR: 'daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_many_to_many_at_second_level_in_search_fields(self):
"""
When using a ManyToMany in search_fields at the second level behind a
ForeignKey, distinct() must be called and results shouldn't appear more
than once.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Concert.objects.create(name='Woodstock', group=band)
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = ConcertAdmin(Concert, custom_site)
request = self.factory.get('/concert/', data={SEARCH_VAR: 'vox'})
cl = ChangeList(request, Concert, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# There's only one Concert instance
self.assertEqual(cl.queryset.count(), 1)
def test_pagination(self):
"""
Regression tests for #12893: Pagination in admins changelist doesn't
use queryset set by modeladmin.
"""
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
request = self.factory.get('/child/')
# Test default queryset
m = ChildAdmin(Child, custom_site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.queryset.count(), 60)
self.assertEqual(cl.paginator.count, 60)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3, 4, 5, 6])
# Test custom queryset
m = FilteredChildAdmin(Child, custom_site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.queryset.count(), 30)
self.assertEqual(cl.paginator.count, 30)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3])
def test_computed_list_display_localization(self):
"""
Regression test for #13196: output of functions should be localized
in the changelist.
"""
User.objects.create_superuser(
username='super', email='super@localhost', password='secret')
self.client.login(username='super', password='secret')
event = Event.objects.create(date=datetime.date.today())
response = self.client.get(reverse('admin:admin_changelist_event_changelist'))
self.assertContains(response, formats.localize(event.date))
self.assertNotContains(response, six.text_type(event.date))
def test_dynamic_list_display(self):
"""
Regression tests for #14206: dynamic list_display support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertNotContains(response, 'Parent object')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ['name', 'age'])
self.assertEqual(list_display_links, ['name'])
# Test with user 'parents'
m = DynamicListDisplayChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
custom_site.unregister(Child)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['parent'])
# Test default implementation
custom_site.register(Child, ChildAdmin)
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
def test_show_all(self):
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
# Add "show all" parameter to request
request = self.factory.get('/child/', data={ALL_VAR: ''})
# Test valid "show all" request (number of total objects is under max)
m = ChildAdmin(Child, custom_site)
# 200 is the max we'll pass to ChangeList
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 200, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 60)
# Test invalid "show all" request (number of total objects over max)
# falls back to paginated pages
m = ChildAdmin(Child, custom_site)
# 30 is the max we'll pass to ChangeList for this test
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 30, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 10)
def test_dynamic_list_display_links(self):
"""
Regression tests for #16257: dynamic list_display_links support.
"""
parent = Parent.objects.create(name='parent')
for i in range(1, 10):
Child.objects.create(id=i, name='child %s' % i, parent=parent, age=i)
m = DynamicListDisplayLinksChildAdmin(Child, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/child/', superuser)
response = m.changelist_view(request)
for i in range(1, 10):
link = reverse('admin:admin_changelist_child_change', args=(i,))
self.assertContains(response, '<a href="%s">%s</a>' % (link, i))
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['age'])
def test_no_list_display_links(self):
"""#15185 -- Allow no links from the 'change list' view grid."""
p = Parent.objects.create(name='parent')
m = NoListDisplayLinksParentAdmin(Parent, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/parent/', superuser)
response = m.changelist_view(request)
link = reverse('admin:admin_changelist_parent_change', args=(p.pk,))
self.assertNotContains(response, '<a href="%s">' % link)
def test_tuple_list_display(self):
"""
Regression test for #17128
(ChangeList failing under Python 2.5 after r16319)
"""
swallow = Swallow.objects.create(origin='Africa', load='12.34', speed='22.2')
swallow2 = Swallow.objects.create(origin='Africa', load='12.34', speed='22.2')
swallow_o2o = SwallowOneToOne.objects.create(swallow=swallow2)
model_admin = SwallowAdmin(Swallow, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/swallow/', superuser)
response = model_admin.changelist_view(request)
# just want to ensure it doesn't blow up during rendering
self.assertContains(response, six.text_type(swallow.origin))
self.assertContains(response, six.text_type(swallow.load))
self.assertContains(response, six.text_type(swallow.speed))
# Reverse one-to-one relations should work.
self.assertContains(response, '<td class="field-swallowonetoone">-</td>')
self.assertContains(response, '<td class="field-swallowonetoone">%s</td>' % swallow_o2o)
def test_deterministic_order_for_unordered_model(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model doesn't have any default ordering defined.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
UnorderedObject.objects.create(id=counter, bool=True)
class UnorderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
custom_site.register(UnorderedObject, UnorderedObjectAdmin)
model_admin = UnorderedObjectAdmin(UnorderedObject, custom_site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/unorderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
custom_site.unregister(UnorderedObject)
# When no order is defined at all, everything is ordered by '-pk'.
check_results_order()
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
UnorderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
UnorderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
UnorderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
UnorderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
UnorderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_deterministic_order_for_model_ordered_by_its_manager(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model has a manager that defines a default ordering.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
OrderedObject.objects.create(id=counter, bool=True, number=counter)
class OrderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
custom_site.register(OrderedObject, OrderedObjectAdmin)
model_admin = OrderedObjectAdmin(OrderedObject, custom_site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/orderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
custom_site.unregister(OrderedObject)
# When no order is defined at all, use the model's default ordering (i.e. 'number')
check_results_order(ascending=True)
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
OrderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
OrderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
OrderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
OrderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
OrderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_dynamic_list_filter(self):
"""
Regression tests for ticket #17646: dynamic list_filter support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = DynamicListFilterChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ['name', 'age'])
# Test with user 'parents'
m = DynamicListFilterChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ('parent', 'name', 'age'))
def test_dynamic_search_fields(self):
child = self._create_superuser('child')
m = DynamicSearchFieldsChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', child)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].search_fields, ('name', 'age'))
def test_pagination_page_range(self):
"""
Regression tests for ticket #15653: ensure the number of pages
generated for changelist views are correct.
"""
# instantiating and setting up ChangeList object
m = GroupAdmin(Group, custom_site)
request = self.factory.get('/group/')
cl = ChangeList(request, Group, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
per_page = cl.list_per_page = 10
for page_num, objects_count, expected_page_range in [
(0, per_page, []),
(0, per_page * 2, list(range(2))),
(5, per_page * 11, list(range(11))),
(5, per_page * 12, [0, 1, 2, 3, 4, 5, 6, 7, 8, '.', 10, 11]),
(6, per_page * 12, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, 10, 11]),
(6, per_page * 13, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, '.', 11, 12]),
]:
# assuming we have exactly `objects_count` objects
Group.objects.all().delete()
for i in range(objects_count):
Group.objects.create(name='test band')
# setting page number and calculating page range
cl.page_num = page_num
cl.get_results(request)
real_page_range = pagination(cl)['page_range']
self.assertListEqual(
expected_page_range,
list(real_page_range),
)
class AdminLogNodeTestCase(TestCase):
def test_get_admin_log_templatetag_custom_user(self):
"""
Regression test for ticket #20088: admin log depends on User model
having id field as primary key.
The old implementation raised an AttributeError when trying to use
the id field.
"""
context = Context({'user': CustomIdUser()})
template_string = '{% load log %}{% get_admin_log 10 as admin_log for_user user %}'
template = Template(template_string)
# Rendering should be u'' since this templatetag just logs,
# it doesn't render any string.
self.assertEqual(template.render(context), '')
def test_get_admin_log_templatetag_no_user(self):
"""
The {% get_admin_log %} tag should work without specifying a user.
"""
user = User(username='jondoe', password='secret', email='super@example.com')
user.save()
ct = ContentType.objects.get_for_model(User)
LogEntry.objects.log_action(user.pk, ct.pk, user.pk, repr(user), 1)
t = Template(
'{% load log %}'
'{% get_admin_log 100 as admin_log %}'
'{% for entry in admin_log %}'
'{{ entry|safe }}'
'{% endfor %}'
)
self.assertEqual(t.render(Context({})), 'Added "<User: jondoe>".')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_changelist.urls")
class SeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_changelist'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def setUp(self):
# password = "secret"
User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='super@example.com',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime.datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def test_add_row_selection(self):
"""
Ensure that the status line for selected rows gets updated correcly (#22038)
"""
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:auth_user_changelist')))
form_id = '#changelist-form'
# Test amount of rows in the Changelist
rows = self.selenium.find_elements_by_css_selector(
'%s #result_list tbody tr' % form_id)
self.assertEqual(len(rows), 1)
# Test current selection
selection_indicator = self.selenium.find_element_by_css_selector(
'%s .action-counter' % form_id)
self.assertEqual(selection_indicator.text, "0 of 1 selected")
# Select a row and check again
row_selector = self.selenium.find_element_by_css_selector(
'%s #result_list tbody tr:first-child .action-select' % form_id)
row_selector.click()
self.assertEqual(selection_indicator.text, "1 of 1 selected")
class SeleniumChromeTests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class SeleniumIETests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
| bsd-3-clause |
cytec/SickRage | lib/requests/packages/chardet/langcyrillicmodel.py | 2762 | 17725 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# KOI8-R language model
# Character Mapping Table:
KOI8R_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90
223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0
238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0
27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0
15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0
59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0
35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0
)
win1251_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
)
latin5_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
macCyrillic_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255,
)
IBM855_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205,
206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70,
3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219,
220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229,
230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243,
8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248,
43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249,
250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255,
)
IBM866_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 97.6601%
# first 1024 sequences: 2.3389%
# rest sequences: 0.1237%
# negative sequences: 0.0009%
RussianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1,
1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1,
1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0,
2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1,
1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0,
3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1,
1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0,
2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2,
1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1,
1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1,
1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1,
1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0,
3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2,
1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1,
2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1,
1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0,
2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1,
1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0,
1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1,
1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0,
3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1,
3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1,
1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,
1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1,
0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1,
1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0,
1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1,
0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1,
1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2,
2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1,
1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0,
1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0,
2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,
1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1,
1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1,
1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1,
0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1,
0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,
2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0,
0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
)
Koi8rModel = {
'charToOrderMap': KOI8R_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "KOI8-R"
}
Win1251CyrillicModel = {
'charToOrderMap': win1251_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
Latin5CyrillicModel = {
'charToOrderMap': latin5_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
MacCyrillicModel = {
'charToOrderMap': macCyrillic_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "MacCyrillic"
};
Ibm866Model = {
'charToOrderMap': IBM866_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM866"
}
Ibm855Model = {
'charToOrderMap': IBM855_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM855"
}
# flake8: noqa
| gpl-3.0 |
adamncasey/servo | components/script/dom/bindings/codegen/parser/WebIDL.py | 6 | 275842 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
""" A WebIDL parser. """
from ply import lex, yacc
import re
import os
import traceback
import math
import string
from collections import defaultdict
# Machinery
def parseInt(literal):
string = literal
sign = 0
base = 0
if string[0] == '-':
sign = -1
string = string[1:]
else:
sign = 1
if string[0] == '0' and len(string) > 1:
if string[1] == 'x' or string[1] == 'X':
base = 16
string = string[2:]
else:
base = 8
string = string[1:]
else:
base = 10
value = int(string, base)
return value * sign
# Magic for creating enums
def M_add_class_attribs(attribs, start):
def foo(name, bases, dict_):
for v, k in enumerate(attribs):
dict_[k] = start + v
assert 'length' not in dict_
dict_['length'] = start + len(attribs)
return type(name, bases, dict_)
return foo
def enum(*names, **kw):
if len(kw) == 1:
base = kw['base'].__class__
start = base.length
else:
assert len(kw) == 0
base = object
start = 0
class Foo(base):
__metaclass__ = M_add_class_attribs(names, start)
def __setattr__(self, name, value): # this makes it read-only
raise NotImplementedError
return Foo()
class WebIDLError(Exception):
def __init__(self, message, locations, warning=False):
self.message = message
self.locations = [str(loc) for loc in locations]
self.warning = warning
def __str__(self):
return "%s: %s%s%s" % (self.warning and 'warning' or 'error',
self.message,
", " if len(self.locations) != 0 else "",
"\n".join(self.locations))
class Location(object):
def __init__(self, lexer, lineno, lexpos, filename):
self._line = None
self._lineno = lineno
self._lexpos = lexpos
self._lexdata = lexer.lexdata
self._file = filename if filename else "<unknown>"
def __eq__(self, other):
return (self._lexpos == other._lexpos and
self._file == other._file)
def filename(self):
return self._file
def resolve(self):
if self._line:
return
startofline = self._lexdata.rfind('\n', 0, self._lexpos) + 1
endofline = self._lexdata.find('\n', self._lexpos, self._lexpos + 80)
if endofline != -1:
self._line = self._lexdata[startofline:endofline]
else:
self._line = self._lexdata[startofline:]
self._colno = self._lexpos - startofline
# Our line number seems to point to the start of self._lexdata
self._lineno += self._lexdata.count('\n', 0, startofline)
def get(self):
self.resolve()
return "%s line %s:%s" % (self._file, self._lineno, self._colno)
def _pointerline(self):
return " " * self._colno + "^"
def __str__(self):
self.resolve()
return "%s line %s:%s\n%s\n%s" % (self._file, self._lineno, self._colno,
self._line, self._pointerline())
class BuiltinLocation(object):
def __init__(self, text):
self.msg = text + "\n"
def __eq__(self, other):
return (isinstance(other, BuiltinLocation) and
self.msg == other.msg)
def filename(self):
return '<builtin>'
def resolve(self):
pass
def get(self):
return self.msg
def __str__(self):
return self.get()
# Data Model
class IDLObject(object):
def __init__(self, location):
self.location = location
self.userData = dict()
def filename(self):
return self.location.filename()
def isInterface(self):
return False
def isNamespace(self):
return False
def isEnum(self):
return False
def isCallback(self):
return False
def isType(self):
return False
def isDictionary(self):
return False
def isUnion(self):
return False
def isTypedef(self):
return False
def getUserData(self, key, default):
return self.userData.get(key, default)
def setUserData(self, key, value):
self.userData[key] = value
def addExtendedAttributes(self, attrs):
assert False # Override me!
def handleExtendedAttribute(self, attr):
assert False # Override me!
def _getDependentObjects(self):
assert False # Override me!
def getDeps(self, visited=None):
""" Return a set of files that this object depends on. If any of
these files are changed the parser needs to be rerun to regenerate
a new IDLObject.
The visited argument is a set of all the objects already visited.
We must test to see if we are in it, and if so, do nothing. This
prevents infinite recursion."""
# NB: We can't use visited=set() above because the default value is
# evaluated when the def statement is evaluated, not when the function
# is executed, so there would be one set for all invocations.
if visited is None:
visited = set()
if self in visited:
return set()
visited.add(self)
deps = set()
if self.filename() != "<builtin>":
deps.add(self.filename())
for d in self._getDependentObjects():
deps.update(d.getDeps(visited))
return deps
class IDLScope(IDLObject):
def __init__(self, location, parentScope, identifier):
IDLObject.__init__(self, location)
self.parentScope = parentScope
if identifier:
assert isinstance(identifier, IDLIdentifier)
self._name = identifier
else:
self._name = None
self._dict = {}
self.globalNames = set()
# A mapping from global name to the set of global interfaces
# that have that global name.
self.globalNameMapping = defaultdict(set)
self.primaryGlobalAttr = None
self.primaryGlobalName = None
def __str__(self):
return self.QName()
def QName(self):
if self._name:
return self._name.QName() + "::"
return "::"
def ensureUnique(self, identifier, object):
"""
Ensure that there is at most one 'identifier' in scope ('self').
Note that object can be None. This occurs if we end up here for an
interface type we haven't seen yet.
"""
assert isinstance(identifier, IDLUnresolvedIdentifier)
assert not object or isinstance(object, IDLObjectWithIdentifier)
assert not object or object.identifier == identifier
if identifier.name in self._dict:
if not object:
return
# ensureUnique twice with the same object is not allowed
assert id(object) != id(self._dict[identifier.name])
replacement = self.resolveIdentifierConflict(self, identifier,
self._dict[identifier.name],
object)
self._dict[identifier.name] = replacement
return
assert object
self._dict[identifier.name] = object
def resolveIdentifierConflict(self, scope, identifier, originalObject, newObject):
if (isinstance(originalObject, IDLExternalInterface) and
isinstance(newObject, IDLExternalInterface) and
originalObject.identifier.name == newObject.identifier.name):
return originalObject
if (isinstance(originalObject, IDLExternalInterface) or
isinstance(newObject, IDLExternalInterface)):
raise WebIDLError(
"Name collision between "
"interface declarations for identifier '%s' at '%s' and '%s'"
% (identifier.name,
originalObject.location, newObject.location), [])
if (isinstance(originalObject, IDLDictionary) or
isinstance(newObject, IDLDictionary)):
raise WebIDLError(
"Name collision between dictionary declarations for "
"identifier '%s'.\n%s\n%s"
% (identifier.name,
originalObject.location, newObject.location), [])
# We do the merging of overloads here as opposed to in IDLInterface
# because we need to merge overloads of NamedConstructors and we need to
# detect conflicts in those across interfaces. See also the comment in
# IDLInterface.addExtendedAttributes for "NamedConstructor".
if (isinstance(originalObject, IDLMethod) and
isinstance(newObject, IDLMethod)):
return originalObject.addOverload(newObject)
# Default to throwing, derived classes can override.
conflictdesc = "\n\t%s at %s\n\t%s at %s" % (originalObject,
originalObject.location,
newObject,
newObject.location)
raise WebIDLError(
"Multiple unresolvable definitions of identifier '%s' in scope '%s%s"
% (identifier.name, str(self), conflictdesc), [])
def _lookupIdentifier(self, identifier):
return self._dict[identifier.name]
def lookupIdentifier(self, identifier):
assert isinstance(identifier, IDLIdentifier)
assert identifier.scope == self
return self._lookupIdentifier(identifier)
class IDLIdentifier(IDLObject):
def __init__(self, location, scope, name):
IDLObject.__init__(self, location)
self.name = name
assert isinstance(scope, IDLScope)
self.scope = scope
def __str__(self):
return self.QName()
def QName(self):
return self.scope.QName() + self.name
def __hash__(self):
return self.QName().__hash__()
def __eq__(self, other):
return self.QName() == other.QName()
def object(self):
return self.scope.lookupIdentifier(self)
class IDLUnresolvedIdentifier(IDLObject):
def __init__(self, location, name, allowDoubleUnderscore=False,
allowForbidden=False):
IDLObject.__init__(self, location)
assert len(name) > 0
if name == "__noSuchMethod__":
raise WebIDLError("__noSuchMethod__ is deprecated", [location])
if name[:2] == "__" and name != "__content" and not allowDoubleUnderscore:
raise WebIDLError("Identifiers beginning with __ are reserved",
[location])
if name[0] == '_' and not allowDoubleUnderscore:
name = name[1:]
if (name in ["constructor", "toString"] and
not allowForbidden):
raise WebIDLError("Cannot use reserved identifier '%s'" % (name),
[location])
self.name = name
def __str__(self):
return self.QName()
def QName(self):
return "<unresolved scope>::" + self.name
def resolve(self, scope, object):
assert isinstance(scope, IDLScope)
assert not object or isinstance(object, IDLObjectWithIdentifier)
assert not object or object.identifier == self
scope.ensureUnique(self, object)
identifier = IDLIdentifier(self.location, scope, self.name)
if object:
object.identifier = identifier
return identifier
def finish(self):
assert False # Should replace with a resolved identifier first.
class IDLObjectWithIdentifier(IDLObject):
def __init__(self, location, parentScope, identifier):
IDLObject.__init__(self, location)
assert isinstance(identifier, IDLUnresolvedIdentifier)
self.identifier = identifier
if parentScope:
self.resolve(parentScope)
self.treatNullAs = "Default"
def resolve(self, parentScope):
assert isinstance(parentScope, IDLScope)
assert isinstance(self.identifier, IDLUnresolvedIdentifier)
self.identifier.resolve(parentScope, self)
def checkForStringHandlingExtendedAttributes(self, attrs,
isDictionaryMember=False,
isOptional=False):
"""
A helper function to deal with TreatNullAs. Returns the list
of attrs it didn't handle itself.
"""
assert isinstance(self, IDLArgument) or isinstance(self, IDLAttribute)
unhandledAttrs = list()
for attr in attrs:
if not attr.hasValue():
unhandledAttrs.append(attr)
continue
identifier = attr.identifier()
value = attr.value()
if identifier == "TreatNullAs":
if not self.type.isDOMString() or self.type.nullable():
raise WebIDLError("[TreatNullAs] is only allowed on "
"arguments or attributes whose type is "
"DOMString",
[self.location])
if isDictionaryMember:
raise WebIDLError("[TreatNullAs] is not allowed for "
"dictionary members", [self.location])
if value != 'EmptyString':
raise WebIDLError("[TreatNullAs] must take the identifier "
"'EmptyString', not '%s'" % value,
[self.location])
self.treatNullAs = value
else:
unhandledAttrs.append(attr)
return unhandledAttrs
class IDLObjectWithScope(IDLObjectWithIdentifier, IDLScope):
def __init__(self, location, parentScope, identifier):
assert isinstance(identifier, IDLUnresolvedIdentifier)
IDLObjectWithIdentifier.__init__(self, location, parentScope, identifier)
IDLScope.__init__(self, location, parentScope, self.identifier)
class IDLIdentifierPlaceholder(IDLObjectWithIdentifier):
def __init__(self, location, identifier):
assert isinstance(identifier, IDLUnresolvedIdentifier)
IDLObjectWithIdentifier.__init__(self, location, None, identifier)
def finish(self, scope):
try:
scope._lookupIdentifier(self.identifier)
except:
raise WebIDLError("Unresolved type '%s'." % self.identifier,
[self.location])
obj = self.identifier.resolve(scope, None)
return scope.lookupIdentifier(obj)
class IDLExposureMixins():
def __init__(self, location):
# _exposureGlobalNames are the global names listed in our [Exposed]
# extended attribute. exposureSet is the exposure set as defined in the
# Web IDL spec: it contains interface names.
self._exposureGlobalNames = set()
self.exposureSet = set()
self._location = location
self._globalScope = None
def finish(self, scope):
assert scope.parentScope is None
self._globalScope = scope
# Verify that our [Exposed] value, if any, makes sense.
for globalName in self._exposureGlobalNames:
if globalName not in scope.globalNames:
raise WebIDLError("Unknown [Exposed] value %s" % globalName,
[self._location])
if len(self._exposureGlobalNames) == 0:
self._exposureGlobalNames.add(scope.primaryGlobalName)
globalNameSetToExposureSet(scope, self._exposureGlobalNames,
self.exposureSet)
def isExposedInWindow(self):
return 'Window' in self.exposureSet
def isExposedOnMainThread(self):
return (self.isExposedInWindow() or
self.isExposedInSystemGlobals())
def isExposedInAnyWorker(self):
return len(self.getWorkerExposureSet()) > 0
def isExposedInWorkerDebugger(self):
return len(self.getWorkerDebuggerExposureSet()) > 0
def isExposedInAnyWorklet(self):
return len(self.getWorkletExposureSet()) > 0
def isExposedInSystemGlobals(self):
return 'BackstagePass' in self.exposureSet
def isExposedInSomeButNotAllWorkers(self):
"""
Returns true if the Exposed extended attribute for this interface
exposes it in some worker globals but not others. The return value does
not depend on whether the interface is exposed in Window or System
globals.
"""
if not self.isExposedInAnyWorker():
return False
workerScopes = self.parentScope.globalNameMapping["Worker"]
return len(workerScopes.difference(self.exposureSet)) > 0
def getWorkerExposureSet(self):
workerScopes = self._globalScope.globalNameMapping["Worker"]
return workerScopes.intersection(self.exposureSet)
def getWorkletExposureSet(self):
workletScopes = self._globalScope.globalNameMapping["Worklet"]
return workletScopes.intersection(self.exposureSet)
def getWorkerDebuggerExposureSet(self):
workerDebuggerScopes = self._globalScope.globalNameMapping["WorkerDebugger"]
return workerDebuggerScopes.intersection(self.exposureSet)
class IDLExternalInterface(IDLObjectWithIdentifier, IDLExposureMixins):
def __init__(self, location, parentScope, identifier):
assert isinstance(identifier, IDLUnresolvedIdentifier)
assert isinstance(parentScope, IDLScope)
self.parent = None
IDLObjectWithIdentifier.__init__(self, location, parentScope, identifier)
IDLExposureMixins.__init__(self, location)
IDLObjectWithIdentifier.resolve(self, parentScope)
def finish(self, scope):
IDLExposureMixins.finish(self, scope)
pass
def validate(self):
pass
def isIteratorInterface(self):
return False
def isExternal(self):
return True
def isInterface(self):
return True
def isConsequential(self):
return False
def addExtendedAttributes(self, attrs):
if len(attrs) != 0:
raise WebIDLError("There are no extended attributes that are "
"allowed on external interfaces",
[attrs[0].location, self.location])
def resolve(self, parentScope):
pass
def getJSImplementation(self):
return None
def isJSImplemented(self):
return False
def hasProbablyShortLivingWrapper(self):
return False
def isNavigatorProperty(self):
return False
def _getDependentObjects(self):
return set()
class IDLPartialInterfaceOrNamespace(IDLObject):
def __init__(self, location, name, members, nonPartialInterfaceOrNamespace):
assert isinstance(name, IDLUnresolvedIdentifier)
IDLObject.__init__(self, location)
self.identifier = name
self.members = members
# propagatedExtendedAttrs are the ones that should get
# propagated to our non-partial interface.
self.propagatedExtendedAttrs = []
self._haveSecureContextExtendedAttribute = False
self._nonPartialInterfaceOrNamespace = nonPartialInterfaceOrNamespace
self._finished = False
nonPartialInterfaceOrNamespace.addPartialInterface(self)
def addExtendedAttributes(self, attrs):
for attr in attrs:
identifier = attr.identifier()
if identifier in ["Constructor", "NamedConstructor"]:
self.propagatedExtendedAttrs.append(attr)
elif identifier == "SecureContext":
self._haveSecureContextExtendedAttribute = True
# This gets propagated to all our members.
for member in self.members:
if member.getExtendedAttribute("SecureContext"):
raise WebIDLError("[SecureContext] specified on both a "
"partial interface member and on the "
"partial interface itself",
[member.location, attr.location])
member.addExtendedAttributes([attr])
elif identifier == "Exposed":
# This just gets propagated to all our members.
for member in self.members:
if len(member._exposureGlobalNames) != 0:
raise WebIDLError("[Exposed] specified on both a "
"partial interface member and on the "
"partial interface itself",
[member.location, attr.location])
member.addExtendedAttributes([attr])
else:
raise WebIDLError("Unknown extended attribute %s on partial "
"interface" % identifier,
[attr.location])
def finish(self, scope):
if self._finished:
return
self._finished = True
if (not self._haveSecureContextExtendedAttribute and
self._nonPartialInterfaceOrNamespace.getExtendedAttribute("SecureContext")):
# This gets propagated to all our members.
for member in self.members:
if member.getExtendedAttribute("SecureContext"):
raise WebIDLError("[SecureContext] specified on both a "
"partial interface member and on the "
"non-partial interface",
[member.location,
self._nonPartialInterfaceOrNamespace.location])
member.addExtendedAttributes(
[IDLExtendedAttribute(self._nonPartialInterfaceOrNamespace.location,
("SecureContext",))])
# Need to make sure our non-partial interface or namespace gets
# finished so it can report cases when we only have partial
# interfaces/namespaces.
self._nonPartialInterfaceOrNamespace.finish(scope)
def validate(self):
pass
def convertExposedAttrToGlobalNameSet(exposedAttr, targetSet):
assert len(targetSet) == 0
if exposedAttr.hasValue():
targetSet.add(exposedAttr.value())
else:
assert exposedAttr.hasArgs()
targetSet.update(exposedAttr.args())
def globalNameSetToExposureSet(globalScope, nameSet, exposureSet):
for name in nameSet:
exposureSet.update(globalScope.globalNameMapping[name])
class IDLInterfaceOrNamespace(IDLObjectWithScope, IDLExposureMixins):
def __init__(self, location, parentScope, name, parent, members,
isKnownNonPartial):
assert isinstance(parentScope, IDLScope)
assert isinstance(name, IDLUnresolvedIdentifier)
assert isKnownNonPartial or not parent
assert isKnownNonPartial or len(members) == 0
self.parent = None
self._callback = False
self._finished = False
self.members = []
self.maplikeOrSetlikeOrIterable = None
self._partialInterfaces = []
self._extendedAttrDict = {}
# namedConstructors needs deterministic ordering because bindings code
# outputs the constructs in the order that namedConstructors enumerates
# them.
self.namedConstructors = list()
self.implementedInterfaces = set()
self._consequential = False
self._isKnownNonPartial = False
# self.interfacesBasedOnSelf is the set of interfaces that inherit from
# self or have self as a consequential interface, including self itself.
# Used for distinguishability checking.
self.interfacesBasedOnSelf = set([self])
# self.interfacesImplementingSelf is the set of interfaces that directly
# have self as a consequential interface
self.interfacesImplementingSelf = set()
self._hasChildInterfaces = False
self._isOnGlobalProtoChain = False
# Tracking of the number of reserved slots we need for our
# members and those of ancestor interfaces.
self.totalMembersInSlots = 0
# Tracking of the number of own own members we have in slots
self._ownMembersInSlots = 0
# If this is an iterator interface, we need to know what iterable
# interface we're iterating for in order to get its nativeType.
self.iterableInterface = None
IDLObjectWithScope.__init__(self, location, parentScope, name)
IDLExposureMixins.__init__(self, location)
if isKnownNonPartial:
self.setNonPartial(location, parent, members)
def ctor(self):
identifier = IDLUnresolvedIdentifier(self.location, "constructor",
allowForbidden=True)
try:
return self._lookupIdentifier(identifier)
except:
return None
def isIterable(self):
return (self.maplikeOrSetlikeOrIterable and
self.maplikeOrSetlikeOrIterable.isIterable())
def isIteratorInterface(self):
return self.iterableInterface is not None
def resolveIdentifierConflict(self, scope, identifier, originalObject, newObject):
assert isinstance(scope, IDLScope)
assert isinstance(originalObject, IDLInterfaceMember)
assert isinstance(newObject, IDLInterfaceMember)
retval = IDLScope.resolveIdentifierConflict(self, scope, identifier,
originalObject, newObject)
# Might be a ctor, which isn't in self.members
if newObject in self.members:
self.members.remove(newObject)
return retval
def finish(self, scope):
if self._finished:
return
self._finished = True
if not self._isKnownNonPartial:
raise WebIDLError("Interface %s does not have a non-partial "
"declaration" % self.identifier.name,
[self.location])
IDLExposureMixins.finish(self, scope)
# Now go ahead and merge in our partial interfaces.
for partial in self._partialInterfaces:
partial.finish(scope)
self.addExtendedAttributes(partial.propagatedExtendedAttrs)
self.members.extend(partial.members)
# Generate maplike/setlike interface members. Since generated members
# need to be treated like regular interface members, do this before
# things like exposure setting.
for member in self.members:
if member.isMaplikeOrSetlikeOrIterable():
# Check that we only have one interface declaration (currently
# there can only be one maplike/setlike declaration per
# interface)
if self.maplikeOrSetlikeOrIterable:
raise WebIDLError("%s declaration used on "
"interface that already has %s "
"declaration" %
(member.maplikeOrSetlikeOrIterableType,
self.maplikeOrSetlikeOrIterable.maplikeOrSetlikeOrIterableType),
[self.maplikeOrSetlikeOrIterable.location,
member.location])
self.maplikeOrSetlikeOrIterable = member
# If we've got a maplike or setlike declaration, we'll be building all of
# our required methods in Codegen. Generate members now.
self.maplikeOrSetlikeOrIterable.expand(self.members, self.isJSImplemented())
# Now that we've merged in our partial interfaces, set the
# _exposureGlobalNames on any members that don't have it set yet. Note
# that any partial interfaces that had [Exposed] set have already set up
# _exposureGlobalNames on all the members coming from them, so this is
# just implementing the "members default to interface that defined them"
# and "partial interfaces default to interface they're a partial for"
# rules from the spec.
for m in self.members:
# If m, or the partial interface m came from, had [Exposed]
# specified, it already has a nonempty exposure global names set.
if len(m._exposureGlobalNames) == 0:
m._exposureGlobalNames.update(self._exposureGlobalNames)
assert not self.parent or isinstance(self.parent, IDLIdentifierPlaceholder)
parent = self.parent.finish(scope) if self.parent else None
if parent and isinstance(parent, IDLExternalInterface):
raise WebIDLError("%s inherits from %s which does not have "
"a definition" %
(self.identifier.name,
self.parent.identifier.name),
[self.location])
assert not parent or isinstance(parent, IDLInterface)
self.parent = parent
assert iter(self.members)
if self.isNamespace():
assert not self.parent
for m in self.members:
if m.isAttr() or m.isMethod():
if m.isStatic():
raise WebIDLError("Don't mark things explicitly static "
"in namespaces",
[self.location, m.location])
# Just mark all our methods/attributes as static. The other
# option is to duplicate the relevant InterfaceMembers
# production bits but modified to produce static stuff to
# start with, but that sounds annoying.
m.forceStatic()
if self.parent:
self.parent.finish(scope)
self.parent._hasChildInterfaces = True
self.totalMembersInSlots = self.parent.totalMembersInSlots
# Interfaces with [Global] or [PrimaryGlobal] must not
# have anything inherit from them
if (self.parent.getExtendedAttribute("Global") or
self.parent.getExtendedAttribute("PrimaryGlobal")):
# Note: This is not a self.parent.isOnGlobalProtoChain() check
# because ancestors of a [Global] interface can have other
# descendants.
raise WebIDLError("[Global] interface has another interface "
"inheriting from it",
[self.location, self.parent.location])
# Make sure that we're not exposed in places where our parent is not
if not self.exposureSet.issubset(self.parent.exposureSet):
raise WebIDLError("Interface %s is exposed in globals where its "
"parent interface %s is not exposed." %
(self.identifier.name,
self.parent.identifier.name),
[self.location, self.parent.location])
# Callbacks must not inherit from non-callbacks or inherit from
# anything that has consequential interfaces.
# XXXbz Can non-callbacks inherit from callbacks? Spec issue pending.
# XXXbz Can callbacks have consequential interfaces? Spec issue pending
if self.isCallback():
if not self.parent.isCallback():
raise WebIDLError("Callback interface %s inheriting from "
"non-callback interface %s" %
(self.identifier.name,
self.parent.identifier.name),
[self.location, self.parent.location])
elif self.parent.isCallback():
raise WebIDLError("Non-callback interface %s inheriting from "
"callback interface %s" %
(self.identifier.name,
self.parent.identifier.name),
[self.location, self.parent.location])
# Interfaces which have interface objects can't inherit
# from [NoInterfaceObject] interfaces.
if (self.parent.getExtendedAttribute("NoInterfaceObject") and
not self.getExtendedAttribute("NoInterfaceObject")):
raise WebIDLError("Interface %s does not have "
"[NoInterfaceObject] but inherits from "
"interface %s which does" %
(self.identifier.name,
self.parent.identifier.name),
[self.location, self.parent.location])
# Interfaces that are not [SecureContext] can't inherit
# from [SecureContext] interfaces.
if (self.parent.getExtendedAttribute("SecureContext") and
not self.getExtendedAttribute("SecureContext")):
raise WebIDLError("Interface %s does not have "
"[SecureContext] but inherits from "
"interface %s which does" %
(self.identifier.name,
self.parent.identifier.name),
[self.location, self.parent.location])
for iface in self.implementedInterfaces:
iface.finish(scope)
cycleInGraph = self.findInterfaceLoopPoint(self)
if cycleInGraph:
raise WebIDLError("Interface %s has itself as ancestor or "
"implemented interface" % self.identifier.name,
[self.location, cycleInGraph.location])
if self.isCallback():
# "implements" should have made sure we have no
# consequential interfaces.
assert len(self.getConsequentialInterfaces()) == 0
# And that we're not consequential.
assert not self.isConsequential()
# Now resolve() and finish() our members before importing the
# ones from our implemented interfaces.
# resolve() will modify self.members, so we need to iterate
# over a copy of the member list here.
for member in list(self.members):
member.resolve(self)
for member in self.members:
member.finish(scope)
# Now that we've finished our members, which has updated their exposure
# sets, make sure they aren't exposed in places where we are not.
for member in self.members:
if not member.exposureSet.issubset(self.exposureSet):
raise WebIDLError("Interface member has larger exposure set "
"than the interface itself",
[member.location, self.location])
ctor = self.ctor()
if ctor is not None:
assert len(ctor._exposureGlobalNames) == 0
ctor._exposureGlobalNames.update(self._exposureGlobalNames)
ctor.finish(scope)
for ctor in self.namedConstructors:
assert len(ctor._exposureGlobalNames) == 0
ctor._exposureGlobalNames.update(self._exposureGlobalNames)
ctor.finish(scope)
# Make a copy of our member list, so things that implement us
# can get those without all the stuff we implement ourselves
# admixed.
self.originalMembers = list(self.members)
# Import everything from our consequential interfaces into
# self.members. Sort our consequential interfaces by name
# just so we have a consistent order.
for iface in sorted(self.getConsequentialInterfaces(),
cmp=cmp,
key=lambda x: x.identifier.name):
# Flag the interface as being someone's consequential interface
iface.setIsConsequentialInterfaceOf(self)
# Verify that we're not exposed somewhere where iface is not exposed
if not self.exposureSet.issubset(iface.exposureSet):
raise WebIDLError("Interface %s is exposed in globals where its "
"consequential interface %s is not exposed." %
(self.identifier.name, iface.identifier.name),
[self.location, iface.location])
# If we have a maplike or setlike, and the consequential interface
# also does, throw an error.
if iface.maplikeOrSetlikeOrIterable and self.maplikeOrSetlikeOrIterable:
raise WebIDLError("Maplike/setlike/iterable interface %s cannot have "
"maplike/setlike/iterable interface %s as a "
"consequential interface" %
(self.identifier.name,
iface.identifier.name),
[self.maplikeOrSetlikeOrIterable.location,
iface.maplikeOrSetlikeOrIterable.location])
additionalMembers = iface.originalMembers
for additionalMember in additionalMembers:
for member in self.members:
if additionalMember.identifier.name == member.identifier.name:
raise WebIDLError(
"Multiple definitions of %s on %s coming from 'implements' statements" %
(member.identifier.name, self),
[additionalMember.location, member.location])
self.members.extend(additionalMembers)
iface.interfacesImplementingSelf.add(self)
for ancestor in self.getInheritedInterfaces():
ancestor.interfacesBasedOnSelf.add(self)
if (ancestor.maplikeOrSetlikeOrIterable is not None and
self.maplikeOrSetlikeOrIterable is not None):
raise WebIDLError("Cannot have maplike/setlike on %s that "
"inherits %s, which is already "
"maplike/setlike" %
(self.identifier.name,
ancestor.identifier.name),
[self.maplikeOrSetlikeOrIterable.location,
ancestor.maplikeOrSetlikeOrIterable.location])
for ancestorConsequential in ancestor.getConsequentialInterfaces():
ancestorConsequential.interfacesBasedOnSelf.add(self)
# Deal with interfaces marked [Unforgeable], now that we have our full
# member list, except unforgeables pulled in from parents. We want to
# do this before we set "originatingInterface" on our unforgeable
# members.
if self.getExtendedAttribute("Unforgeable"):
# Check that the interface already has all the things the
# spec would otherwise require us to synthesize and is
# missing the ones we plan to synthesize.
if not any(m.isMethod() and m.isStringifier() for m in self.members):
raise WebIDLError("Unforgeable interface %s does not have a "
"stringifier" % self.identifier.name,
[self.location])
for m in self.members:
if ((m.isMethod() and m.isJsonifier()) or
m.identifier.name == "toJSON"):
raise WebIDLError("Unforgeable interface %s has a "
"jsonifier so we won't be able to add "
"one ourselves" % self.identifier.name,
[self.location, m.location])
if m.identifier.name == "valueOf" and not m.isStatic():
raise WebIDLError("Unforgeable interface %s has a valueOf "
"member so we won't be able to add one "
"ourselves" % self.identifier.name,
[self.location, m.location])
for member in self.members:
if ((member.isAttr() or member.isMethod()) and
member.isUnforgeable() and
not hasattr(member, "originatingInterface")):
member.originatingInterface = self
# Compute slot indices for our members before we pull in unforgeable
# members from our parent. Also, maplike/setlike declarations get a
# slot to hold their backing object.
for member in self.members:
if ((member.isAttr() and
(member.getExtendedAttribute("StoreInSlot") or
member.getExtendedAttribute("Cached"))) or
member.isMaplikeOrSetlike()):
if member.slotIndices is None:
member.slotIndices = dict()
member.slotIndices[self.identifier.name] = self.totalMembersInSlots
self.totalMembersInSlots += 1
if member.getExtendedAttribute("StoreInSlot"):
self._ownMembersInSlots += 1
if self.parent:
# Make sure we don't shadow any of the [Unforgeable] attributes on
# our ancestor interfaces. We don't have to worry about
# consequential interfaces here, because those have already been
# imported into the relevant .members lists. And we don't have to
# worry about anything other than our parent, because it has already
# imported its ancestors unforgeable attributes into its member
# list.
for unforgeableMember in (member for member in self.parent.members if
(member.isAttr() or member.isMethod()) and
member.isUnforgeable()):
shadows = [m for m in self.members if
(m.isAttr() or m.isMethod()) and
not m.isStatic() and
m.identifier.name == unforgeableMember.identifier.name]
if len(shadows) != 0:
locs = [unforgeableMember.location] + [s.location for s
in shadows]
raise WebIDLError("Interface %s shadows [Unforgeable] "
"members of %s" %
(self.identifier.name,
ancestor.identifier.name),
locs)
# And now just stick it in our members, since we won't be
# inheriting this down the proto chain. If we really cared we
# could try to do something where we set up the unforgeable
# attributes/methods of ancestor interfaces, with their
# corresponding getters, on our interface, but that gets pretty
# complicated and seems unnecessary.
self.members.append(unforgeableMember)
# At this point, we have all of our members. If the current interface
# uses maplike/setlike, check for collisions anywhere in the current
# interface or higher in the inheritance chain.
if self.maplikeOrSetlikeOrIterable:
testInterface = self
isAncestor = False
while testInterface:
self.maplikeOrSetlikeOrIterable.checkCollisions(testInterface.members,
isAncestor)
isAncestor = True
testInterface = testInterface.parent
# Ensure that there's at most one of each {named,indexed}
# {getter,setter,creator,deleter}, at most one stringifier,
# and at most one legacycaller. Note that this last is not
# quite per spec, but in practice no one overloads
# legacycallers. Also note that in practice we disallow
# indexed deleters, but it simplifies some other code to
# treat deleter analogously to getter/setter/creator by
# prefixing it with "named".
specialMembersSeen = {}
for member in self.members:
if not member.isMethod():
continue
if member.isGetter():
memberType = "getters"
elif member.isSetter():
memberType = "setters"
elif member.isCreator():
memberType = "creators"
elif member.isDeleter():
memberType = "deleters"
elif member.isStringifier():
memberType = "stringifiers"
elif member.isJsonifier():
memberType = "jsonifiers"
elif member.isLegacycaller():
memberType = "legacycallers"
else:
continue
if (memberType != "stringifiers" and memberType != "legacycallers" and
memberType != "jsonifiers"):
if member.isNamed():
memberType = "named " + memberType
else:
assert member.isIndexed()
memberType = "indexed " + memberType
if memberType in specialMembersSeen:
raise WebIDLError("Multiple " + memberType + " on %s" % (self),
[self.location,
specialMembersSeen[memberType].location,
member.location])
specialMembersSeen[memberType] = member
if self.getExtendedAttribute("LegacyUnenumerableNamedProperties"):
# Check that we have a named getter.
if "named getters" not in specialMembersSeen:
raise WebIDLError(
"Interface with [LegacyUnenumerableNamedProperties] does "
"not have a named getter",
[self.location])
ancestor = self.parent
while ancestor:
if ancestor.getExtendedAttribute("LegacyUnenumerableNamedProperties"):
raise WebIDLError(
"Interface with [LegacyUnenumerableNamedProperties] "
"inherits from another interface with "
"[LegacyUnenumerableNamedProperties]",
[self.location, ancestor.location])
ancestor = ancestor.parent
if self._isOnGlobalProtoChain:
# Make sure we have no named setters, creators, or deleters
for memberType in ["setter", "creator", "deleter"]:
memberId = "named " + memberType + "s"
if memberId in specialMembersSeen:
raise WebIDLError("Interface with [Global] has a named %s" %
memberType,
[self.location,
specialMembersSeen[memberId].location])
# Make sure we're not [OverrideBuiltins]
if self.getExtendedAttribute("OverrideBuiltins"):
raise WebIDLError("Interface with [Global] also has "
"[OverrideBuiltins]",
[self.location])
# Mark all of our ancestors as being on the global's proto chain too
parent = self.parent
while parent:
# Must not inherit from an interface with [OverrideBuiltins]
if parent.getExtendedAttribute("OverrideBuiltins"):
raise WebIDLError("Interface with [Global] inherits from "
"interface with [OverrideBuiltins]",
[self.location, parent.location])
parent._isOnGlobalProtoChain = True
parent = parent.parent
def validate(self):
# We don't support consequential unforgeable interfaces. Need to check
# this here, because in finish() an interface might not know yet that
# it's consequential.
if self.getExtendedAttribute("Unforgeable") and self.isConsequential():
raise WebIDLError(
"%s is an unforgeable consequential interface" %
self.identifier.name,
[self.location] +
list(i.location for i in
(self.interfacesBasedOnSelf - {self})))
# We also don't support inheriting from unforgeable interfaces.
if self.getExtendedAttribute("Unforgeable") and self.hasChildInterfaces():
locations = ([self.location] +
list(i.location for i in
self.interfacesBasedOnSelf if i.parent == self))
raise WebIDLError("%s is an unforgeable ancestor interface" %
self.identifier.name,
locations)
ctor = self.ctor()
if ctor is not None:
ctor.validate()
for namedCtor in self.namedConstructors:
namedCtor.validate()
indexedGetter = None
hasLengthAttribute = False
for member in self.members:
member.validate()
if self.isCallback() and member.getExtendedAttribute("Replaceable"):
raise WebIDLError("[Replaceable] used on an attribute on "
"interface %s which is a callback interface" %
self.identifier.name,
[self.location, member.location])
# Check that PutForwards refers to another attribute and that no
# cycles exist in forwarded assignments. Also check for a
# integer-typed "length" attribute.
if member.isAttr():
if (member.identifier.name == "length" and
member.type.isInteger()):
hasLengthAttribute = True
iface = self
attr = member
putForwards = attr.getExtendedAttribute("PutForwards")
if putForwards and self.isCallback():
raise WebIDLError("[PutForwards] used on an attribute "
"on interface %s which is a callback "
"interface" % self.identifier.name,
[self.location, member.location])
while putForwards is not None:
forwardIface = attr.type.unroll().inner
fowardAttr = None
for forwardedMember in forwardIface.members:
if (not forwardedMember.isAttr() or
forwardedMember.identifier.name != putForwards[0]):
continue
if forwardedMember == member:
raise WebIDLError("Cycle detected in forwarded "
"assignments for attribute %s on "
"%s" %
(member.identifier.name, self),
[member.location])
fowardAttr = forwardedMember
break
if fowardAttr is None:
raise WebIDLError("Attribute %s on %s forwards to "
"missing attribute %s" %
(attr.identifier.name, iface, putForwards),
[attr.location])
iface = forwardIface
attr = fowardAttr
putForwards = attr.getExtendedAttribute("PutForwards")
# Check that the name of an [Alias] doesn't conflict with an
# interface member and whether we support indexed properties.
if member.isMethod():
if member.isGetter() and member.isIndexed():
indexedGetter = member
for alias in member.aliases:
if self.isOnGlobalProtoChain():
raise WebIDLError("[Alias] must not be used on a "
"[Global] interface operation",
[member.location])
if (member.getExtendedAttribute("Exposed") or
member.getExtendedAttribute("ChromeOnly") or
member.getExtendedAttribute("Pref") or
member.getExtendedAttribute("Func") or
member.getExtendedAttribute("SecureContext")):
raise WebIDLError("[Alias] must not be used on a "
"conditionally exposed operation",
[member.location])
if member.isStatic():
raise WebIDLError("[Alias] must not be used on a "
"static operation",
[member.location])
if member.isIdentifierLess():
raise WebIDLError("[Alias] must not be used on an "
"identifierless operation",
[member.location])
if member.isUnforgeable():
raise WebIDLError("[Alias] must not be used on an "
"[Unforgeable] operation",
[member.location])
for m in self.members:
if m.identifier.name == alias:
raise WebIDLError("[Alias=%s] has same name as "
"interface member" % alias,
[member.location, m.location])
if m.isMethod() and m != member and alias in m.aliases:
raise WebIDLError("duplicate [Alias=%s] definitions" %
alias,
[member.location, m.location])
# Conditional exposure makes no sense for interfaces with no
# interface object, unless they're navigator properties.
# And SecureContext makes sense for interfaces with no interface object,
# since it is also propagated to interface members.
if (self.isExposedConditionally(exclusions=["SecureContext"]) and
not self.hasInterfaceObject() and
not self.isNavigatorProperty()):
raise WebIDLError("Interface with no interface object is "
"exposed conditionally",
[self.location])
# Value iterators are only allowed on interfaces with indexed getters,
# and pair iterators are only allowed on interfaces without indexed
# getters.
if self.isIterable():
iterableDecl = self.maplikeOrSetlikeOrIterable
if iterableDecl.isValueIterator():
if not indexedGetter:
raise WebIDLError("Interface with value iterator does not "
"support indexed properties",
[self.location, iterableDecl.location])
if iterableDecl.valueType != indexedGetter.signatures()[0][0]:
raise WebIDLError("Iterable type does not match indexed "
"getter type",
[iterableDecl.location,
indexedGetter.location])
if not hasLengthAttribute:
raise WebIDLError('Interface with value iterator does not '
'have an integer-typed "length" attribute',
[self.location, iterableDecl.location])
else:
assert iterableDecl.isPairIterator()
if indexedGetter:
raise WebIDLError("Interface with pair iterator supports "
"indexed properties",
[self.location, iterableDecl.location,
indexedGetter.location])
if indexedGetter and not hasLengthAttribute:
raise WebIDLError('Interface with an indexed getter does not have '
'an integer-typed "length" attribute',
[self.location, indexedGetter.location])
def isExternal(self):
return False
def setIsConsequentialInterfaceOf(self, other):
self._consequential = True
self.interfacesBasedOnSelf.add(other)
def isConsequential(self):
return self._consequential
def setCallback(self, value):
self._callback = value
def isCallback(self):
return self._callback
def isSingleOperationInterface(self):
assert self.isCallback() or self.isJSImplemented()
return (
# JS-implemented things should never need the
# this-handling weirdness of single-operation interfaces.
not self.isJSImplemented() and
# Not inheriting from another interface
not self.parent and
# No consequential interfaces
len(self.getConsequentialInterfaces()) == 0 and
# No attributes of any kinds
not any(m.isAttr() for m in self.members) and
# There is at least one regular operation, and all regular
# operations have the same identifier
len(set(m.identifier.name for m in self.members if
m.isMethod() and not m.isStatic())) == 1)
def inheritanceDepth(self):
depth = 0
parent = self.parent
while parent:
depth = depth + 1
parent = parent.parent
return depth
def hasConstants(self):
return any(m.isConst() for m in self.members)
def hasInterfaceObject(self):
if self.isCallback():
return self.hasConstants()
return not hasattr(self, "_noInterfaceObject")
def hasInterfacePrototypeObject(self):
return (not self.isCallback() and not self.isNamespace()
and self.getUserData('hasConcreteDescendant', False))
def addImplementedInterface(self, implementedInterface):
assert(isinstance(implementedInterface, IDLInterface))
self.implementedInterfaces.add(implementedInterface)
def getInheritedInterfaces(self):
"""
Returns a list of the interfaces this interface inherits from
(not including this interface itself). The list is in order
from most derived to least derived.
"""
assert(self._finished)
if not self.parent:
return []
parentInterfaces = self.parent.getInheritedInterfaces()
parentInterfaces.insert(0, self.parent)
return parentInterfaces
def getConsequentialInterfaces(self):
assert(self._finished)
# The interfaces we implement directly
consequentialInterfaces = set(self.implementedInterfaces)
# And their inherited interfaces
for iface in self.implementedInterfaces:
consequentialInterfaces |= set(iface.getInheritedInterfaces())
# And now collect up the consequential interfaces of all of those
temp = set()
for iface in consequentialInterfaces:
temp |= iface.getConsequentialInterfaces()
return consequentialInterfaces | temp
def findInterfaceLoopPoint(self, otherInterface):
"""
Finds an interface, amongst our ancestors and consequential interfaces,
that inherits from otherInterface or implements otherInterface
directly. If there is no such interface, returns None.
"""
if self.parent:
if self.parent == otherInterface:
return self
loopPoint = self.parent.findInterfaceLoopPoint(otherInterface)
if loopPoint:
return loopPoint
if otherInterface in self.implementedInterfaces:
return self
for iface in self.implementedInterfaces:
loopPoint = iface.findInterfaceLoopPoint(otherInterface)
if loopPoint:
return loopPoint
return None
def getExtendedAttribute(self, name):
return self._extendedAttrDict.get(name, None)
def setNonPartial(self, location, parent, members):
assert not parent or isinstance(parent, IDLIdentifierPlaceholder)
if self._isKnownNonPartial:
raise WebIDLError("Two non-partial definitions for the "
"same %s" %
("interface" if self.isInterface()
else "namespace"),
[location, self.location])
self._isKnownNonPartial = True
# Now make it look like we were parsed at this new location, since
# that's the place where the interface is "really" defined
self.location = location
assert not self.parent
self.parent = parent
# Put the new members at the beginning
self.members = members + self.members
def addPartialInterface(self, partial):
assert self.identifier.name == partial.identifier.name
self._partialInterfaces.append(partial)
def getPartialInterfaces(self):
# Don't let people mutate our guts.
return list(self._partialInterfaces)
def getJSImplementation(self):
classId = self.getExtendedAttribute("JSImplementation")
if not classId:
return classId
assert isinstance(classId, list)
assert len(classId) == 1
return classId[0]
def isJSImplemented(self):
return bool(self.getJSImplementation())
def hasProbablyShortLivingWrapper(self):
current = self
while current:
if current.getExtendedAttribute("ProbablyShortLivingWrapper"):
return True
current = current.parent
return False
def isNavigatorProperty(self):
naviProp = self.getExtendedAttribute("NavigatorProperty")
if not naviProp:
return False
assert len(naviProp) == 1
assert isinstance(naviProp, list)
assert len(naviProp[0]) != 0
return True
def getNavigatorProperty(self):
naviProp = self.getExtendedAttribute("NavigatorProperty")
if not naviProp:
return None
assert len(naviProp) == 1
assert isinstance(naviProp, list)
assert len(naviProp[0]) != 0
conditionExtendedAttributes = self._extendedAttrDict.viewkeys() & IDLInterfaceOrNamespace.conditionExtendedAttributes
attr = IDLAttribute(self.location,
IDLUnresolvedIdentifier(BuiltinLocation("<auto-generated-identifier>"), naviProp[0]),
IDLUnresolvedType(self.location, IDLUnresolvedIdentifier(self.location, self.identifier.name)),
True,
extendedAttrDict={ a: self._extendedAttrDict[a] for a in conditionExtendedAttributes },
navigatorObjectGetter=True)
attr._exposureGlobalNames = self._exposureGlobalNames
# We're abusing Constant a little bit here, because we need Cached. The
# getter will create a new object every time, but we're never going to
# clear the cached value.
extendedAttrs = [ IDLExtendedAttribute(self.location, ("Throws", )),
IDLExtendedAttribute(self.location, ("Cached", )),
IDLExtendedAttribute(self.location, ("Constant", )) ]
attr.addExtendedAttributes(extendedAttrs)
return attr
def hasChildInterfaces(self):
return self._hasChildInterfaces
def isOnGlobalProtoChain(self):
return self._isOnGlobalProtoChain
def _getDependentObjects(self):
deps = set(self.members)
deps.update(self.implementedInterfaces)
if self.parent:
deps.add(self.parent)
return deps
def hasMembersInSlots(self):
return self._ownMembersInSlots != 0
conditionExtendedAttributes = [ "Pref", "ChromeOnly", "Func",
"SecureContext" ]
def isExposedConditionally(self, exclusions=[]):
return any(((not a in exclusions) and self.getExtendedAttribute(a)) for a in self.conditionExtendedAttributes)
class IDLInterface(IDLInterfaceOrNamespace):
def __init__(self, location, parentScope, name, parent, members,
isKnownNonPartial):
IDLInterfaceOrNamespace.__init__(self, location, parentScope, name,
parent, members, isKnownNonPartial)
def __str__(self):
return "Interface '%s'" % self.identifier.name
def isInterface(self):
return True
def addExtendedAttributes(self, attrs):
for attr in attrs:
identifier = attr.identifier()
# Special cased attrs
if identifier == "TreatNonCallableAsNull":
raise WebIDLError("TreatNonCallableAsNull cannot be specified on interfaces",
[attr.location, self.location])
if identifier == "TreatNonObjectAsNull":
raise WebIDLError("TreatNonObjectAsNull cannot be specified on interfaces",
[attr.location, self.location])
elif identifier == "NoInterfaceObject":
if not attr.noArguments():
raise WebIDLError("[NoInterfaceObject] must take no arguments",
[attr.location])
if self.ctor():
raise WebIDLError("Constructor and NoInterfaceObject are incompatible",
[self.location])
self._noInterfaceObject = True
elif identifier == "Constructor" or identifier == "NamedConstructor" or identifier == "ChromeConstructor" or identifier == "HTMLConstructor":
if identifier == "Constructor" and not self.hasInterfaceObject():
raise WebIDLError(str(identifier) + " and NoInterfaceObject are incompatible",
[self.location])
if identifier == "NamedConstructor" and not attr.hasValue():
raise WebIDLError("NamedConstructor must either take an identifier or take a named argument list",
[attr.location])
if identifier == "ChromeConstructor" and not self.hasInterfaceObject():
raise WebIDLError(str(identifier) + " and NoInterfaceObject are incompatible",
[self.location])
if identifier == "HTMLConstructor":
if not self.hasInterfaceObject():
raise WebIDLError(str(identifier) + " and NoInterfaceObject are incompatible",
[self.location])
if not attr.noArguments():
raise WebIDLError(str(identifier) + " must take no arguments",
[attr.location])
args = attr.args() if attr.hasArgs() else []
retType = IDLWrapperType(self.location, self)
if identifier == "Constructor" or identifier == "ChromeConstructor" or identifier == "HTMLConstructor":
name = "constructor"
allowForbidden = True
else:
name = attr.value()
allowForbidden = False
methodIdentifier = IDLUnresolvedIdentifier(self.location, name,
allowForbidden=allowForbidden)
method = IDLMethod(self.location, methodIdentifier, retType,
args, static=True,
htmlConstructor=(identifier == "HTMLConstructor"))
# Constructors are always NewObject and are always
# assumed to be able to throw (since there's no way to
# indicate otherwise) and never have any other
# extended attributes.
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("NewObject",)),
IDLExtendedAttribute(self.location, ("Throws",))])
if identifier == "ChromeConstructor":
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("ChromeOnly",))])
if identifier == "Constructor" or identifier == "ChromeConstructor" or identifier == "HTMLConstructor":
method.resolve(self)
else:
# We need to detect conflicts for NamedConstructors across
# interfaces. We first call resolve on the parentScope,
# which will merge all NamedConstructors with the same
# identifier accross interfaces as overloads.
method.resolve(self.parentScope)
# Then we look up the identifier on the parentScope. If the
# result is the same as the method we're adding then it
# hasn't been added as an overload and it's the first time
# we've encountered a NamedConstructor with that identifier.
# If the result is not the same as the method we're adding
# then it has been added as an overload and we need to check
# whether the result is actually one of our existing
# NamedConstructors.
newMethod = self.parentScope.lookupIdentifier(method.identifier)
if newMethod == method:
self.namedConstructors.append(method)
elif newMethod not in self.namedConstructors:
raise WebIDLError("NamedConstructor conflicts with a NamedConstructor of a different interface",
[method.location, newMethod.location])
elif (identifier == "ArrayClass"):
if not attr.noArguments():
raise WebIDLError("[ArrayClass] must take no arguments",
[attr.location])
if self.parent:
raise WebIDLError("[ArrayClass] must not be specified on "
"an interface with inherited interfaces",
[attr.location, self.location])
elif (identifier == "ExceptionClass"):
if not attr.noArguments():
raise WebIDLError("[ExceptionClass] must take no arguments",
[attr.location])
if self.parent:
raise WebIDLError("[ExceptionClass] must not be specified on "
"an interface with inherited interfaces",
[attr.location, self.location])
elif identifier == "Global":
if attr.hasValue():
self.globalNames = [attr.value()]
elif attr.hasArgs():
self.globalNames = attr.args()
else:
self.globalNames = [self.identifier.name]
self.parentScope.globalNames.update(self.globalNames)
for globalName in self.globalNames:
self.parentScope.globalNameMapping[globalName].add(self.identifier.name)
self._isOnGlobalProtoChain = True
elif identifier == "PrimaryGlobal":
if not attr.noArguments():
raise WebIDLError("[PrimaryGlobal] must take no arguments",
[attr.location])
if self.parentScope.primaryGlobalAttr is not None:
raise WebIDLError(
"[PrimaryGlobal] specified twice",
[attr.location,
self.parentScope.primaryGlobalAttr.location])
self.parentScope.primaryGlobalAttr = attr
self.parentScope.primaryGlobalName = self.identifier.name
self.parentScope.globalNames.add(self.identifier.name)
self.parentScope.globalNameMapping[self.identifier.name].add(self.identifier.name)
self._isOnGlobalProtoChain = True
elif identifier == "SecureContext":
if not attr.noArguments():
raise WebIDLError("[%s] must take no arguments" % identifier,
[attr.location])
# This gets propagated to all our members.
for member in self.members:
if member.getExtendedAttribute("SecureContext"):
raise WebIDLError("[SecureContext] specified on both "
"an interface member and on the "
"interface itself",
[member.location, attr.location])
member.addExtendedAttributes([attr])
elif (identifier == "NeedResolve" or
identifier == "OverrideBuiltins" or
identifier == "ChromeOnly" or
identifier == "Unforgeable" or
identifier == "UnsafeInPrerendering" or
identifier == "LegacyEventInit" or
identifier == "ProbablyShortLivingWrapper" or
identifier == "LegacyUnenumerableNamedProperties" or
identifier == "NonOrdinaryGetPrototypeOf" or
identifier == "Abstract" or
identifier == "Inline"):
# Known extended attributes that do not take values
if not attr.noArguments():
raise WebIDLError("[%s] must take no arguments" % identifier,
[attr.location])
elif identifier == "Exposed":
convertExposedAttrToGlobalNameSet(attr,
self._exposureGlobalNames)
elif (identifier == "Pref" or
identifier == "JSImplementation" or
identifier == "HeaderFile" or
identifier == "NavigatorProperty" or
identifier == "Func" or
identifier == "Deprecated"):
# Known extended attributes that take a string value
if not attr.hasValue():
raise WebIDLError("[%s] must have a value" % identifier,
[attr.location])
else:
raise WebIDLError("Unknown extended attribute %s on interface" % identifier,
[attr.location])
attrlist = attr.listValue()
self._extendedAttrDict[identifier] = attrlist if len(attrlist) else True
class IDLNamespace(IDLInterfaceOrNamespace):
def __init__(self, location, parentScope, name, members, isKnownNonPartial):
IDLInterfaceOrNamespace.__init__(self, location, parentScope, name,
None, members, isKnownNonPartial)
def __str__(self):
return "Namespace '%s'" % self.identifier.name
def isNamespace(self):
return True
def addExtendedAttributes(self, attrs):
# The set of things namespaces support is small enough it's simpler
# to factor out into a separate method than it is to sprinkle
# isNamespace() checks all through
# IDLInterfaceOrNamespace.addExtendedAttributes.
for attr in attrs:
identifier = attr.identifier()
if identifier == "Exposed":
convertExposedAttrToGlobalNameSet(attr,
self._exposureGlobalNames)
elif identifier == "ClassString":
# Takes a string value to override the default "Object" if
# desired.
if not attr.hasValue():
raise WebIDLError("[%s] must have a value" % identifier,
[attr.location])
elif identifier == "ProtoObjectHack":
if not attr.noArguments():
raise WebIDLError("[%s] must not have arguments" % identifier,
[attr.location])
else:
raise WebIDLError("Unknown extended attribute %s on namespace" %
identifier,
[attr.location])
attrlist = attr.listValue()
self._extendedAttrDict[identifier] = attrlist if len(attrlist) else True
class IDLDictionary(IDLObjectWithScope):
def __init__(self, location, parentScope, name, parent, members):
assert isinstance(parentScope, IDLScope)
assert isinstance(name, IDLUnresolvedIdentifier)
assert not parent or isinstance(parent, IDLIdentifierPlaceholder)
self.parent = parent
self._finished = False
self.members = list(members)
IDLObjectWithScope.__init__(self, location, parentScope, name)
def __str__(self):
return "Dictionary '%s'" % self.identifier.name
def isDictionary(self):
return True
def canBeEmpty(self):
"""
Returns true if this dictionary can be empty (that is, it has no
required members and neither do any of its ancestors).
"""
return (all(member.optional for member in self.members) and
(not self.parent or self.parent.canBeEmpty()))
def finish(self, scope):
if self._finished:
return
self._finished = True
if self.parent:
assert isinstance(self.parent, IDLIdentifierPlaceholder)
oldParent = self.parent
self.parent = self.parent.finish(scope)
if not isinstance(self.parent, IDLDictionary):
raise WebIDLError("Dictionary %s has parent that is not a dictionary" %
self.identifier.name,
[oldParent.location, self.parent.location])
# Make sure the parent resolves all its members before we start
# looking at them.
self.parent.finish(scope)
for member in self.members:
member.resolve(self)
if not member.isComplete():
member.complete(scope)
assert member.type.isComplete()
# Members of a dictionary are sorted in lexicographic order
self.members.sort(cmp=cmp, key=lambda x: x.identifier.name)
inheritedMembers = []
ancestor = self.parent
while ancestor:
if ancestor == self:
raise WebIDLError("Dictionary %s has itself as an ancestor" %
self.identifier.name,
[self.identifier.location])
inheritedMembers.extend(ancestor.members)
ancestor = ancestor.parent
# Catch name duplication
for inheritedMember in inheritedMembers:
for member in self.members:
if member.identifier.name == inheritedMember.identifier.name:
raise WebIDLError("Dictionary %s has two members with name %s" %
(self.identifier.name, member.identifier.name),
[member.location, inheritedMember.location])
def validate(self):
def typeContainsDictionary(memberType, dictionary):
"""
Returns a tuple whose:
- First element is a Boolean value indicating whether
memberType contains dictionary.
- Second element is:
A list of locations that leads from the type that was passed in
the memberType argument, to the dictionary being validated,
if the boolean value in the first element is True.
None, if the boolean value in the first element is False.
"""
if (memberType.nullable() or
memberType.isSequence() or
memberType.isRecord()):
return typeContainsDictionary(memberType.inner, dictionary)
if memberType.isDictionary():
if memberType.inner == dictionary:
return (True, [memberType.location])
(contains, locations) = dictionaryContainsDictionary(memberType.inner,
dictionary)
if contains:
return (True, [memberType.location] + locations)
if memberType.isUnion():
for member in memberType.flatMemberTypes:
(contains, locations) = typeContainsDictionary(member, dictionary)
if contains:
return (True, locations)
return (False, None)
def dictionaryContainsDictionary(dictMember, dictionary):
for member in dictMember.members:
(contains, locations) = typeContainsDictionary(member.type, dictionary)
if contains:
return (True, [member.location] + locations)
if dictMember.parent:
if dictMember.parent == dictionary:
return (True, [dictMember.location])
else:
(contains, locations) = dictionaryContainsDictionary(dictMember.parent, dictionary)
if contains:
return (True, [dictMember.location] + locations)
return (False, None)
for member in self.members:
if member.type.isDictionary() and member.type.nullable():
raise WebIDLError("Dictionary %s has member with nullable "
"dictionary type" % self.identifier.name,
[member.location])
(contains, locations) = typeContainsDictionary(member.type, self)
if contains:
raise WebIDLError("Dictionary %s has member with itself as type." %
self.identifier.name,
[member.location] + locations)
def addExtendedAttributes(self, attrs):
if len(attrs) != 0:
raise WebIDLError("There are no extended attributes that are "
"allowed on dictionaries",
[attrs[0].location, self.location])
def _getDependentObjects(self):
deps = set(self.members)
if (self.parent):
deps.add(self.parent)
return deps
class IDLEnum(IDLObjectWithIdentifier):
def __init__(self, location, parentScope, name, values):
assert isinstance(parentScope, IDLScope)
assert isinstance(name, IDLUnresolvedIdentifier)
if len(values) != len(set(values)):
raise WebIDLError("Enum %s has multiple identical strings" % name.name,
[location])
IDLObjectWithIdentifier.__init__(self, location, parentScope, name)
self._values = values
def values(self):
return self._values
def finish(self, scope):
pass
def validate(self):
pass
def isEnum(self):
return True
def addExtendedAttributes(self, attrs):
if len(attrs) != 0:
raise WebIDLError("There are no extended attributes that are "
"allowed on enums",
[attrs[0].location, self.location])
def _getDependentObjects(self):
return set()
class IDLType(IDLObject):
Tags = enum(
# The integer types
'int8',
'uint8',
'int16',
'uint16',
'int32',
'uint32',
'int64',
'uint64',
# Additional primitive types
'bool',
'unrestricted_float',
'float',
'unrestricted_double',
# "double" last primitive type to match IDLBuiltinType
'double',
# Other types
'any',
'domstring',
'bytestring',
'usvstring',
'object',
'date',
'void',
# Funny stuff
'interface',
'dictionary',
'enum',
'callback',
'union',
'sequence',
'record',
'promise',
)
def __init__(self, location, name):
IDLObject.__init__(self, location)
self.name = name
self.builtin = False
def __eq__(self, other):
return other and self.builtin == other.builtin and self.name == other.name
def __ne__(self, other):
return not self == other
def __str__(self):
return str(self.name)
def isType(self):
return True
def nullable(self):
return False
def isPrimitive(self):
return False
def isBoolean(self):
return False
def isNumeric(self):
return False
def isString(self):
return False
def isByteString(self):
return False
def isDOMString(self):
return False
def isUSVString(self):
return False
def isVoid(self):
return self.name == "Void"
def isSequence(self):
return False
def isRecord(self):
return False
def isArrayBuffer(self):
return False
def isArrayBufferView(self):
return False
def isSharedArrayBuffer(self):
return False
def isTypedArray(self):
return False
def isCallbackInterface(self):
return False
def isNonCallbackInterface(self):
return False
def isGeckoInterface(self):
""" Returns a boolean indicating whether this type is an 'interface'
type that is implemented in Gecko. At the moment, this returns
true for all interface types that are not types from the TypedArray
spec."""
return self.isInterface() and not self.isSpiderMonkeyInterface()
def isSpiderMonkeyInterface(self):
""" Returns a boolean indicating whether this type is an 'interface'
type that is implemented in Spidermonkey. At the moment, this
only returns true for the types from the TypedArray spec. """
return self.isInterface() and (self.isArrayBuffer() or
self.isArrayBufferView() or
self.isSharedArrayBuffer() or
self.isTypedArray())
def isDictionary(self):
return False
def isInterface(self):
return False
def isAny(self):
return self.tag() == IDLType.Tags.any
def isDate(self):
return self.tag() == IDLType.Tags.date
def isObject(self):
return self.tag() == IDLType.Tags.object
def isPromise(self):
return False
def isComplete(self):
return True
def includesRestrictedFloat(self):
return False
def isFloat(self):
return False
def isUnrestricted(self):
# Should only call this on float types
assert self.isFloat()
def isSerializable(self):
return False
def tag(self):
assert False # Override me!
def treatNonCallableAsNull(self):
assert self.tag() == IDLType.Tags.callback
return self.nullable() and self.inner.callback._treatNonCallableAsNull
def treatNonObjectAsNull(self):
assert self.tag() == IDLType.Tags.callback
return self.nullable() and self.inner.callback._treatNonObjectAsNull
def addExtendedAttributes(self, attrs):
if len(attrs) != 0:
raise WebIDLError("There are no extended attributes that are "
"allowed on types, for now (but this is "
"changing; see bug 1359269)",
[attrs[0].location, self.location])
def resolveType(self, parentScope):
pass
def unroll(self):
return self
def isDistinguishableFrom(self, other):
raise TypeError("Can't tell whether a generic type is or is not "
"distinguishable from other things")
def isExposedInAllOf(self, exposureSet):
return True
class IDLUnresolvedType(IDLType):
"""
Unresolved types are interface types
"""
def __init__(self, location, name):
IDLType.__init__(self, location, name)
def isComplete(self):
return False
def complete(self, scope):
obj = None
try:
obj = scope._lookupIdentifier(self.name)
except:
raise WebIDLError("Unresolved type '%s'." % self.name,
[self.location])
assert obj
if obj.isType():
print obj
assert not obj.isType()
if obj.isTypedef():
assert self.name.name == obj.identifier.name
typedefType = IDLTypedefType(self.location, obj.innerType,
obj.identifier)
assert not typedefType.isComplete()
return typedefType.complete(scope)
elif obj.isCallback() and not obj.isInterface():
assert self.name.name == obj.identifier.name
return IDLCallbackType(obj.location, obj)
name = self.name.resolve(scope, None)
return IDLWrapperType(self.location, obj)
def isDistinguishableFrom(self, other):
raise TypeError("Can't tell whether an unresolved type is or is not "
"distinguishable from other things")
class IDLParametrizedType(IDLType):
def __init__(self, location, name, innerType):
IDLType.__init__(self, location, name)
self.builtin = False
self.inner = innerType
def includesRestrictedFloat(self):
return self.inner.includesRestrictedFloat()
def resolveType(self, parentScope):
assert isinstance(parentScope, IDLScope)
self.inner.resolveType(parentScope)
def isComplete(self):
return self.inner.isComplete()
def unroll(self):
return self.inner.unroll()
def _getDependentObjects(self):
return self.inner._getDependentObjects()
class IDLNullableType(IDLParametrizedType):
def __init__(self, location, innerType):
assert not innerType.isVoid()
assert not innerType == BuiltinTypes[IDLBuiltinType.Types.any]
name = innerType.name
if innerType.isComplete():
name += "OrNull"
IDLParametrizedType.__init__(self, location, name, innerType)
def __eq__(self, other):
return isinstance(other, IDLNullableType) and self.inner == other.inner
def __str__(self):
return self.inner.__str__() + "OrNull"
def nullable(self):
return True
def isCallback(self):
return self.inner.isCallback()
def isPrimitive(self):
return self.inner.isPrimitive()
def isBoolean(self):
return self.inner.isBoolean()
def isNumeric(self):
return self.inner.isNumeric()
def isString(self):
return self.inner.isString()
def isByteString(self):
return self.inner.isByteString()
def isDOMString(self):
return self.inner.isDOMString()
def isUSVString(self):
return self.inner.isUSVString()
def isFloat(self):
return self.inner.isFloat()
def isUnrestricted(self):
return self.inner.isUnrestricted()
def isInteger(self):
return self.inner.isInteger()
def isVoid(self):
return False
def isSequence(self):
return self.inner.isSequence()
def isRecord(self):
return self.inner.isRecord()
def isArrayBuffer(self):
return self.inner.isArrayBuffer()
def isArrayBufferView(self):
return self.inner.isArrayBufferView()
def isSharedArrayBuffer(self):
return self.inner.isSharedArrayBuffer()
def isTypedArray(self):
return self.inner.isTypedArray()
def isDictionary(self):
return self.inner.isDictionary()
def isInterface(self):
return self.inner.isInterface()
def isPromise(self):
# There is no such thing as a nullable Promise.
assert not self.inner.isPromise()
return False
def isCallbackInterface(self):
return self.inner.isCallbackInterface()
def isNonCallbackInterface(self):
return self.inner.isNonCallbackInterface()
def isEnum(self):
return self.inner.isEnum()
def isUnion(self):
return self.inner.isUnion()
def isSerializable(self):
return self.inner.isSerializable()
def tag(self):
return self.inner.tag()
def complete(self, scope):
self.inner = self.inner.complete(scope)
if self.inner.nullable():
raise WebIDLError("The inner type of a nullable type must not be "
"a nullable type",
[self.location, self.inner.location])
if self.inner.isUnion():
if self.inner.hasNullableType:
raise WebIDLError("The inner type of a nullable type must not "
"be a union type that itself has a nullable "
"type as a member type", [self.location])
self.name = self.inner.name + "OrNull"
return self
def isDistinguishableFrom(self, other):
if (other.nullable() or
other.isDictionary() or
(other.isUnion() and
(other.hasNullableType or other.hasDictionaryType()))):
# Can't tell which type null should become
return False
return self.inner.isDistinguishableFrom(other)
class IDLSequenceType(IDLParametrizedType):
def __init__(self, location, parameterType):
assert not parameterType.isVoid()
IDLParametrizedType.__init__(self, location, parameterType.name, parameterType)
# Need to set self.name up front if our inner type is already complete,
# since in that case our .complete() won't be called.
if self.inner.isComplete():
self.name = self.inner.name + "Sequence"
def __eq__(self, other):
return isinstance(other, IDLSequenceType) and self.inner == other.inner
def __str__(self):
return self.inner.__str__() + "Sequence"
def nullable(self):
return False
def isPrimitive(self):
return False
def isString(self):
return False
def isByteString(self):
return False
def isDOMString(self):
return False
def isUSVString(self):
return False
def isVoid(self):
return False
def isSequence(self):
return True
def isDictionary(self):
return False
def isInterface(self):
return False
def isEnum(self):
return False
def isSerializable(self):
return self.inner.isSerializable()
def tag(self):
return IDLType.Tags.sequence
def complete(self, scope):
self.inner = self.inner.complete(scope)
self.name = self.inner.name + "Sequence"
return self
def isDistinguishableFrom(self, other):
if other.isPromise():
return False
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isDate() or other.isInterface() or
other.isDictionary() or
other.isCallback() or other.isRecord())
class IDLRecordType(IDLParametrizedType):
def __init__(self, location, keyType, valueType):
assert keyType.isString()
assert keyType.isComplete()
assert not valueType.isVoid()
IDLParametrizedType.__init__(self, location, valueType.name, valueType)
self.keyType = keyType
# Need to set self.name up front if our inner type is already complete,
# since in that case our .complete() won't be called.
if self.inner.isComplete():
self.name = self.keyType.name + self.inner.name + "Record"
def __eq__(self, other):
return isinstance(other, IDLRecordType) and self.inner == other.inner
def __str__(self):
return self.keyType.__str__() + self.inner.__str__() + "Record"
def isRecord(self):
return True
def tag(self):
return IDLType.Tags.record
def complete(self, scope):
self.inner = self.inner.complete(scope)
self.name = self.keyType.name + self.inner.name + "Record"
return self
def unroll(self):
# We do not unroll our inner. Just stop at ourselves. That
# lets us add headers for both ourselves and our inner as
# needed.
return self
def isDistinguishableFrom(self, other):
if other.isPromise():
return False
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isDate() or other.isNonCallbackInterface() or other.isSequence())
def isExposedInAllOf(self, exposureSet):
return self.inner.unroll().isExposedInAllOf(exposureSet)
class IDLUnionType(IDLType):
def __init__(self, location, memberTypes):
IDLType.__init__(self, location, "")
self.memberTypes = memberTypes
self.hasNullableType = False
self._dictionaryType = None
self.flatMemberTypes = None
self.builtin = False
def __eq__(self, other):
return isinstance(other, IDLUnionType) and self.memberTypes == other.memberTypes
def __hash__(self):
assert self.isComplete()
return self.name.__hash__()
def isVoid(self):
return False
def isUnion(self):
return True
def isSerializable(self):
return all(m.isSerializable() for m in self.memberTypes)
def includesRestrictedFloat(self):
return any(t.includesRestrictedFloat() for t in self.memberTypes)
def tag(self):
return IDLType.Tags.union
def resolveType(self, parentScope):
assert isinstance(parentScope, IDLScope)
for t in self.memberTypes:
t.resolveType(parentScope)
def isComplete(self):
return self.flatMemberTypes is not None
def complete(self, scope):
def typeName(type):
if isinstance(type, IDLNullableType):
return typeName(type.inner) + "OrNull"
if isinstance(type, IDLWrapperType):
return typeName(type._identifier.object())
if isinstance(type, IDLObjectWithIdentifier):
return typeName(type.identifier)
return type.name
for (i, type) in enumerate(self.memberTypes):
# Exclude typedefs because if given "typedef (B or C) test",
# we want AOrTest, not AOrBOrC
if not type.isComplete() and not isinstance(type, IDLTypedefType):
self.memberTypes[i] = type.complete(scope)
self.name = "Or".join(typeName(type) for type in self.memberTypes)
# We do this again to complete the typedef types
for (i, type) in enumerate(self.memberTypes):
if not type.isComplete():
self.memberTypes[i] = type.complete(scope)
self.flatMemberTypes = list(self.memberTypes)
i = 0
while i < len(self.flatMemberTypes):
if self.flatMemberTypes[i].nullable():
if self.hasNullableType:
raise WebIDLError("Can't have more than one nullable types in a union",
[nullableType.location, self.flatMemberTypes[i].location])
if self.hasDictionaryType():
raise WebIDLError("Can't have a nullable type and a "
"dictionary type in a union",
[self._dictionaryType.location,
self.flatMemberTypes[i].location])
self.hasNullableType = True
nullableType = self.flatMemberTypes[i]
self.flatMemberTypes[i] = self.flatMemberTypes[i].inner
continue
if self.flatMemberTypes[i].isDictionary():
if self.hasNullableType:
raise WebIDLError("Can't have a nullable type and a "
"dictionary type in a union",
[nullableType.location,
self.flatMemberTypes[i].location])
self._dictionaryType = self.flatMemberTypes[i]
elif self.flatMemberTypes[i].isUnion():
self.flatMemberTypes[i:i + 1] = self.flatMemberTypes[i].memberTypes
continue
i += 1
for (i, t) in enumerate(self.flatMemberTypes[:-1]):
for u in self.flatMemberTypes[i + 1:]:
if not t.isDistinguishableFrom(u):
raise WebIDLError("Flat member types of a union should be "
"distinguishable, " + str(t) + " is not "
"distinguishable from " + str(u),
[self.location, t.location, u.location])
return self
def isDistinguishableFrom(self, other):
if self.hasNullableType and other.nullable():
# Can't tell which type null should become
return False
if other.isUnion():
otherTypes = other.unroll().memberTypes
else:
otherTypes = [other]
# For every type in otherTypes, check that it's distinguishable from
# every type in our types
for u in otherTypes:
if any(not t.isDistinguishableFrom(u) for t in self.memberTypes):
return False
return True
def isExposedInAllOf(self, exposureSet):
# We could have different member types in different globals. Just make sure that each thing in exposureSet has one of our member types exposed in it.
for globalName in exposureSet:
if not any(t.unroll().isExposedInAllOf(set([globalName])) for t
in self.flatMemberTypes):
return False
return True
def hasDictionaryType(self):
return self._dictionaryType is not None
def hasPossiblyEmptyDictionaryType(self):
return (self._dictionaryType is not None and
self._dictionaryType.inner.canBeEmpty())
def _getDependentObjects(self):
return set(self.memberTypes)
class IDLTypedefType(IDLType):
def __init__(self, location, innerType, name):
IDLType.__init__(self, location, name)
self.inner = innerType
self.builtin = False
def __eq__(self, other):
return isinstance(other, IDLTypedefType) and self.inner == other.inner
def __str__(self):
return self.name
def nullable(self):
return self.inner.nullable()
def isPrimitive(self):
return self.inner.isPrimitive()
def isBoolean(self):
return self.inner.isBoolean()
def isNumeric(self):
return self.inner.isNumeric()
def isString(self):
return self.inner.isString()
def isByteString(self):
return self.inner.isByteString()
def isDOMString(self):
return self.inner.isDOMString()
def isUSVString(self):
return self.inner.isUSVString()
def isVoid(self):
return self.inner.isVoid()
def isSequence(self):
return self.inner.isSequence()
def isRecord(self):
return self.inner.isRecord()
def isDictionary(self):
return self.inner.isDictionary()
def isArrayBuffer(self):
return self.inner.isArrayBuffer()
def isArrayBufferView(self):
return self.inner.isArrayBufferView()
def isSharedArrayBuffer(self):
return self.inner.isSharedArrayBuffer()
def isTypedArray(self):
return self.inner.isTypedArray()
def isInterface(self):
return self.inner.isInterface()
def isCallbackInterface(self):
return self.inner.isCallbackInterface()
def isNonCallbackInterface(self):
return self.inner.isNonCallbackInterface()
def isComplete(self):
return False
def complete(self, parentScope):
if not self.inner.isComplete():
self.inner = self.inner.complete(parentScope)
assert self.inner.isComplete()
return self.inner
# Do we need a resolveType impl? I don't think it's particularly useful....
def tag(self):
return self.inner.tag()
def unroll(self):
return self.inner.unroll()
def isDistinguishableFrom(self, other):
return self.inner.isDistinguishableFrom(other)
def _getDependentObjects(self):
return self.inner._getDependentObjects()
class IDLTypedef(IDLObjectWithIdentifier):
def __init__(self, location, parentScope, innerType, name):
identifier = IDLUnresolvedIdentifier(location, name)
IDLObjectWithIdentifier.__init__(self, location, parentScope, identifier)
self.innerType = innerType
def __str__(self):
return "Typedef %s %s" % (self.identifier.name, self.innerType)
def finish(self, parentScope):
if not self.innerType.isComplete():
self.innerType = self.innerType.complete(parentScope)
def validate(self):
pass
def isTypedef(self):
return True
def addExtendedAttributes(self, attrs):
if len(attrs) != 0:
raise WebIDLError("There are no extended attributes that are "
"allowed on typedefs",
[attrs[0].location, self.location])
def _getDependentObjects(self):
return self.innerType._getDependentObjects()
class IDLWrapperType(IDLType):
def __init__(self, location, inner):
IDLType.__init__(self, location, inner.identifier.name)
self.inner = inner
self._identifier = inner.identifier
self.builtin = False
def __eq__(self, other):
return (isinstance(other, IDLWrapperType) and
self._identifier == other._identifier and
self.builtin == other.builtin)
def __str__(self):
return str(self.name) + " (Wrapper)"
def nullable(self):
return False
def isPrimitive(self):
return False
def isString(self):
return False
def isByteString(self):
return False
def isDOMString(self):
return False
def isUSVString(self):
return False
def isVoid(self):
return False
def isSequence(self):
return False
def isDictionary(self):
return isinstance(self.inner, IDLDictionary)
def isInterface(self):
return (isinstance(self.inner, IDLInterface) or
isinstance(self.inner, IDLExternalInterface))
def isCallbackInterface(self):
return self.isInterface() and self.inner.isCallback()
def isNonCallbackInterface(self):
return self.isInterface() and not self.inner.isCallback()
def isEnum(self):
return isinstance(self.inner, IDLEnum)
def isSerializable(self):
if self.isInterface():
if self.inner.isExternal():
return False
return any(m.isMethod() and m.isJsonifier() for m in self.inner.members)
elif self.isEnum():
return True
elif self.isDictionary():
return all(m.type.isSerializable() for m in self.inner.members)
else:
raise WebIDLError("IDLWrapperType wraps type %s that we don't know if "
"is serializable" % type(self.inner), [self.location])
def resolveType(self, parentScope):
assert isinstance(parentScope, IDLScope)
self.inner.resolve(parentScope)
def isComplete(self):
return True
def tag(self):
if self.isInterface():
return IDLType.Tags.interface
elif self.isEnum():
return IDLType.Tags.enum
elif self.isDictionary():
return IDLType.Tags.dictionary
else:
assert False
def isDistinguishableFrom(self, other):
if other.isPromise():
return False
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
assert self.isInterface() or self.isEnum() or self.isDictionary()
if self.isEnum():
return (other.isPrimitive() or other.isInterface() or other.isObject() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isRecord() or other.isDate())
if self.isDictionary() and other.nullable():
return False
if (other.isPrimitive() or other.isString() or other.isEnum() or
other.isDate() or other.isSequence()):
return True
if self.isDictionary():
return other.isNonCallbackInterface()
assert self.isInterface()
if other.isInterface():
if other.isSpiderMonkeyInterface():
# Just let |other| handle things
return other.isDistinguishableFrom(self)
assert self.isGeckoInterface() and other.isGeckoInterface()
if self.inner.isExternal() or other.unroll().inner.isExternal():
return self != other
return (len(self.inner.interfacesBasedOnSelf &
other.unroll().inner.interfacesBasedOnSelf) == 0 and
(self.isNonCallbackInterface() or
other.isNonCallbackInterface()))
if (other.isDictionary() or other.isCallback() or
other.isRecord()):
return self.isNonCallbackInterface()
# Not much else |other| can be
assert other.isObject()
return False
def isExposedInAllOf(self, exposureSet):
if not self.isInterface():
return True
iface = self.inner
if iface.isExternal():
# Let's say true, though ideally we'd only do this when
# exposureSet contains the primary global's name.
return True
return iface.exposureSet.issuperset(exposureSet)
def _getDependentObjects(self):
# NB: The codegen for an interface type depends on
# a) That the identifier is in fact an interface (as opposed to
# a dictionary or something else).
# b) The native type of the interface.
# If we depend on the interface object we will also depend on
# anything the interface depends on which is undesirable. We
# considered implementing a dependency just on the interface type
# file, but then every modification to an interface would cause this
# to be regenerated which is still undesirable. We decided not to
# depend on anything, reasoning that:
# 1) Changing the concrete type of the interface requires modifying
# Bindings.conf, which is still a global dependency.
# 2) Changing an interface to a dictionary (or vice versa) with the
# same identifier should be incredibly rare.
#
# On the other hand, if our type is a dictionary, we should
# depend on it, because the member types of a dictionary
# affect whether a method taking the dictionary as an argument
# takes a JSContext* argument or not.
if self.isDictionary():
return set([self.inner])
return set()
class IDLPromiseType(IDLParametrizedType):
def __init__(self, location, innerType):
IDLParametrizedType.__init__(self, location, "Promise", innerType)
def __eq__(self, other):
return (isinstance(other, IDLPromiseType) and
self.promiseInnerType() == other.promiseInnerType())
def __str__(self):
return self.inner.__str__() + "Promise"
def isPromise(self):
return True
def promiseInnerType(self):
return self.inner
def tag(self):
return IDLType.Tags.promise
def complete(self, scope):
self.inner = self.promiseInnerType().complete(scope)
return self
def unroll(self):
# We do not unroll our inner. Just stop at ourselves. That
# lets us add headers for both ourselves and our inner as
# needed.
return self
def isDistinguishableFrom(self, other):
# Promises are not distinguishable from anything.
return False
def isExposedInAllOf(self, exposureSet):
# Check the internal type
return self.promiseInnerType().unroll().isExposedInAllOf(exposureSet)
class IDLBuiltinType(IDLType):
Types = enum(
# The integer types
'byte',
'octet',
'short',
'unsigned_short',
'long',
'unsigned_long',
'long_long',
'unsigned_long_long',
# Additional primitive types
'boolean',
'unrestricted_float',
'float',
'unrestricted_double',
# IMPORTANT: "double" must be the last primitive type listed
'double',
# Other types
'any',
'domstring',
'bytestring',
'usvstring',
'object',
'date',
'void',
# Funny stuff
'ArrayBuffer',
'ArrayBufferView',
'SharedArrayBuffer',
'Int8Array',
'Uint8Array',
'Uint8ClampedArray',
'Int16Array',
'Uint16Array',
'Int32Array',
'Uint32Array',
'Float32Array',
'Float64Array'
)
TagLookup = {
Types.byte: IDLType.Tags.int8,
Types.octet: IDLType.Tags.uint8,
Types.short: IDLType.Tags.int16,
Types.unsigned_short: IDLType.Tags.uint16,
Types.long: IDLType.Tags.int32,
Types.unsigned_long: IDLType.Tags.uint32,
Types.long_long: IDLType.Tags.int64,
Types.unsigned_long_long: IDLType.Tags.uint64,
Types.boolean: IDLType.Tags.bool,
Types.unrestricted_float: IDLType.Tags.unrestricted_float,
Types.float: IDLType.Tags.float,
Types.unrestricted_double: IDLType.Tags.unrestricted_double,
Types.double: IDLType.Tags.double,
Types.any: IDLType.Tags.any,
Types.domstring: IDLType.Tags.domstring,
Types.bytestring: IDLType.Tags.bytestring,
Types.usvstring: IDLType.Tags.usvstring,
Types.object: IDLType.Tags.object,
Types.date: IDLType.Tags.date,
Types.void: IDLType.Tags.void,
Types.ArrayBuffer: IDLType.Tags.interface,
Types.ArrayBufferView: IDLType.Tags.interface,
Types.SharedArrayBuffer: IDLType.Tags.interface,
Types.Int8Array: IDLType.Tags.interface,
Types.Uint8Array: IDLType.Tags.interface,
Types.Uint8ClampedArray: IDLType.Tags.interface,
Types.Int16Array: IDLType.Tags.interface,
Types.Uint16Array: IDLType.Tags.interface,
Types.Int32Array: IDLType.Tags.interface,
Types.Uint32Array: IDLType.Tags.interface,
Types.Float32Array: IDLType.Tags.interface,
Types.Float64Array: IDLType.Tags.interface
}
def __init__(self, location, name, type):
IDLType.__init__(self, location, name)
self.builtin = True
self._typeTag = type
def isPrimitive(self):
return self._typeTag <= IDLBuiltinType.Types.double
def isBoolean(self):
return self._typeTag == IDLBuiltinType.Types.boolean
def isNumeric(self):
return self.isPrimitive() and not self.isBoolean()
def isString(self):
return (self._typeTag == IDLBuiltinType.Types.domstring or
self._typeTag == IDLBuiltinType.Types.bytestring or
self._typeTag == IDLBuiltinType.Types.usvstring)
def isByteString(self):
return self._typeTag == IDLBuiltinType.Types.bytestring
def isDOMString(self):
return self._typeTag == IDLBuiltinType.Types.domstring
def isUSVString(self):
return self._typeTag == IDLBuiltinType.Types.usvstring
def isInteger(self):
return self._typeTag <= IDLBuiltinType.Types.unsigned_long_long
def isArrayBuffer(self):
return self._typeTag == IDLBuiltinType.Types.ArrayBuffer
def isArrayBufferView(self):
return self._typeTag == IDLBuiltinType.Types.ArrayBufferView
def isSharedArrayBuffer(self):
return self._typeTag == IDLBuiltinType.Types.SharedArrayBuffer
def isTypedArray(self):
return (self._typeTag >= IDLBuiltinType.Types.Int8Array and
self._typeTag <= IDLBuiltinType.Types.Float64Array)
def isInterface(self):
# TypedArray things are interface types per the TypedArray spec,
# but we handle them as builtins because SpiderMonkey implements
# all of it internally.
return (self.isArrayBuffer() or
self.isArrayBufferView() or
self.isSharedArrayBuffer() or
self.isTypedArray())
def isNonCallbackInterface(self):
# All the interfaces we can be are non-callback
return self.isInterface()
def isFloat(self):
return (self._typeTag == IDLBuiltinType.Types.float or
self._typeTag == IDLBuiltinType.Types.double or
self._typeTag == IDLBuiltinType.Types.unrestricted_float or
self._typeTag == IDLBuiltinType.Types.unrestricted_double)
def isUnrestricted(self):
assert self.isFloat()
return (self._typeTag == IDLBuiltinType.Types.unrestricted_float or
self._typeTag == IDLBuiltinType.Types.unrestricted_double)
def isSerializable(self):
return self.isPrimitive() or self.isString() or self.isDate()
def includesRestrictedFloat(self):
return self.isFloat() and not self.isUnrestricted()
def tag(self):
return IDLBuiltinType.TagLookup[self._typeTag]
def isDistinguishableFrom(self, other):
if other.isPromise():
return False
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
if self.isBoolean():
return (other.isNumeric() or other.isString() or other.isEnum() or
other.isInterface() or other.isObject() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isRecord() or other.isDate())
if self.isNumeric():
return (other.isBoolean() or other.isString() or other.isEnum() or
other.isInterface() or other.isObject() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isRecord() or other.isDate())
if self.isString():
return (other.isPrimitive() or other.isInterface() or
other.isObject() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isRecord() or other.isDate())
if self.isAny():
# Can't tell "any" apart from anything
return False
if self.isObject():
return other.isPrimitive() or other.isString() or other.isEnum()
if self.isDate():
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isInterface() or other.isCallback() or
other.isDictionary() or other.isSequence() or
other.isRecord())
if self.isVoid():
return not other.isVoid()
# Not much else we could be!
assert self.isSpiderMonkeyInterface()
# Like interfaces, but we know we're not a callback
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isRecord() or other.isDate() or
(other.isInterface() and (
# ArrayBuffer is distinguishable from everything
# that's not an ArrayBuffer or a callback interface
(self.isArrayBuffer() and not other.isArrayBuffer()) or
(self.isSharedArrayBuffer() and not other.isSharedArrayBuffer()) or
# ArrayBufferView is distinguishable from everything
# that's not an ArrayBufferView or typed array.
(self.isArrayBufferView() and not other.isArrayBufferView() and
not other.isTypedArray()) or
# Typed arrays are distinguishable from everything
# except ArrayBufferView and the same type of typed
# array
(self.isTypedArray() and not other.isArrayBufferView() and not
(other.isTypedArray() and other.name == self.name)))))
def _getDependentObjects(self):
return set()
BuiltinTypes = {
IDLBuiltinType.Types.byte:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Byte",
IDLBuiltinType.Types.byte),
IDLBuiltinType.Types.octet:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Octet",
IDLBuiltinType.Types.octet),
IDLBuiltinType.Types.short:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Short",
IDLBuiltinType.Types.short),
IDLBuiltinType.Types.unsigned_short:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnsignedShort",
IDLBuiltinType.Types.unsigned_short),
IDLBuiltinType.Types.long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Long",
IDLBuiltinType.Types.long),
IDLBuiltinType.Types.unsigned_long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnsignedLong",
IDLBuiltinType.Types.unsigned_long),
IDLBuiltinType.Types.long_long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "LongLong",
IDLBuiltinType.Types.long_long),
IDLBuiltinType.Types.unsigned_long_long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnsignedLongLong",
IDLBuiltinType.Types.unsigned_long_long),
IDLBuiltinType.Types.boolean:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Boolean",
IDLBuiltinType.Types.boolean),
IDLBuiltinType.Types.float:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Float",
IDLBuiltinType.Types.float),
IDLBuiltinType.Types.unrestricted_float:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnrestrictedFloat",
IDLBuiltinType.Types.unrestricted_float),
IDLBuiltinType.Types.double:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Double",
IDLBuiltinType.Types.double),
IDLBuiltinType.Types.unrestricted_double:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnrestrictedDouble",
IDLBuiltinType.Types.unrestricted_double),
IDLBuiltinType.Types.any:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Any",
IDLBuiltinType.Types.any),
IDLBuiltinType.Types.domstring:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "String",
IDLBuiltinType.Types.domstring),
IDLBuiltinType.Types.bytestring:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "ByteString",
IDLBuiltinType.Types.bytestring),
IDLBuiltinType.Types.usvstring:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "USVString",
IDLBuiltinType.Types.usvstring),
IDLBuiltinType.Types.object:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Object",
IDLBuiltinType.Types.object),
IDLBuiltinType.Types.date:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Date",
IDLBuiltinType.Types.date),
IDLBuiltinType.Types.void:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Void",
IDLBuiltinType.Types.void),
IDLBuiltinType.Types.ArrayBuffer:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "ArrayBuffer",
IDLBuiltinType.Types.ArrayBuffer),
IDLBuiltinType.Types.ArrayBufferView:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "ArrayBufferView",
IDLBuiltinType.Types.ArrayBufferView),
IDLBuiltinType.Types.SharedArrayBuffer:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "SharedArrayBuffer",
IDLBuiltinType.Types.SharedArrayBuffer),
IDLBuiltinType.Types.Int8Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Int8Array",
IDLBuiltinType.Types.Int8Array),
IDLBuiltinType.Types.Uint8Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint8Array",
IDLBuiltinType.Types.Uint8Array),
IDLBuiltinType.Types.Uint8ClampedArray:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint8ClampedArray",
IDLBuiltinType.Types.Uint8ClampedArray),
IDLBuiltinType.Types.Int16Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Int16Array",
IDLBuiltinType.Types.Int16Array),
IDLBuiltinType.Types.Uint16Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint16Array",
IDLBuiltinType.Types.Uint16Array),
IDLBuiltinType.Types.Int32Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Int32Array",
IDLBuiltinType.Types.Int32Array),
IDLBuiltinType.Types.Uint32Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint32Array",
IDLBuiltinType.Types.Uint32Array),
IDLBuiltinType.Types.Float32Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Float32Array",
IDLBuiltinType.Types.Float32Array),
IDLBuiltinType.Types.Float64Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Float64Array",
IDLBuiltinType.Types.Float64Array)
}
integerTypeSizes = {
IDLBuiltinType.Types.byte: (-128, 127),
IDLBuiltinType.Types.octet: (0, 255),
IDLBuiltinType.Types.short: (-32768, 32767),
IDLBuiltinType.Types.unsigned_short: (0, 65535),
IDLBuiltinType.Types.long: (-2147483648, 2147483647),
IDLBuiltinType.Types.unsigned_long: (0, 4294967295),
IDLBuiltinType.Types.long_long: (-9223372036854775808, 9223372036854775807),
IDLBuiltinType.Types.unsigned_long_long: (0, 18446744073709551615)
}
def matchIntegerValueToType(value):
for type, extremes in integerTypeSizes.items():
(min, max) = extremes
if value <= max and value >= min:
return BuiltinTypes[type]
return None
class NoCoercionFoundError(WebIDLError):
"""
A class we use to indicate generic coercion failures because none of the
types worked out in IDLValue.coerceToType.
"""
class IDLValue(IDLObject):
def __init__(self, location, type, value):
IDLObject.__init__(self, location)
self.type = type
assert isinstance(type, IDLType)
self.value = value
def coerceToType(self, type, location):
if type == self.type:
return self # Nothing to do
# We first check for unions to ensure that even if the union is nullable
# we end up with the right flat member type, not the union's type.
if type.isUnion():
# We use the flat member types here, because if we have a nullable
# member type, or a nested union, we want the type the value
# actually coerces to, not the nullable or nested union type.
for subtype in type.unroll().flatMemberTypes:
try:
coercedValue = self.coerceToType(subtype, location)
# Create a new IDLValue to make sure that we have the
# correct float/double type. This is necessary because we
# use the value's type when it is a default value of a
# union, and the union cares about the exact float type.
return IDLValue(self.location, subtype, coercedValue.value)
except Exception as e:
# Make sure to propagate out WebIDLErrors that are not the
# generic "hey, we could not coerce to this type at all"
# exception, because those are specific "coercion failed for
# reason X" exceptions. Note that we want to swallow
# non-WebIDLErrors here, because those can just happen if
# "type" is not something that can have a default value at
# all.
if (isinstance(e, WebIDLError) and
not isinstance(e, NoCoercionFoundError)):
raise e
# If the type allows null, rerun this matching on the inner type, except
# nullable enums. We handle those specially, because we want our
# default string values to stay strings even when assigned to a nullable
# enum.
elif type.nullable() and not type.isEnum():
innerValue = self.coerceToType(type.inner, location)
return IDLValue(self.location, type, innerValue.value)
elif self.type.isInteger() and type.isInteger():
# We're both integer types. See if we fit.
(min, max) = integerTypeSizes[type._typeTag]
if self.value <= max and self.value >= min:
# Promote
return IDLValue(self.location, type, self.value)
else:
raise WebIDLError("Value %s is out of range for type %s." %
(self.value, type), [location])
elif self.type.isInteger() and type.isFloat():
# Convert an integer literal into float
if -2**24 <= self.value <= 2**24:
return IDLValue(self.location, type, float(self.value))
else:
raise WebIDLError("Converting value %s to %s will lose precision." %
(self.value, type), [location])
elif self.type.isString() and type.isEnum():
# Just keep our string, but make sure it's a valid value for this enum
enum = type.unroll().inner
if self.value not in enum.values():
raise WebIDLError("'%s' is not a valid default value for enum %s"
% (self.value, enum.identifier.name),
[location, enum.location])
return self
elif self.type.isFloat() and type.isFloat():
if (not type.isUnrestricted() and
(self.value == float("inf") or self.value == float("-inf") or
math.isnan(self.value))):
raise WebIDLError("Trying to convert unrestricted value %s to non-unrestricted"
% self.value, [location])
return IDLValue(self.location, type, self.value)
elif self.type.isString() and type.isUSVString():
# Allow USVStrings to use default value just like
# DOMString. No coercion is required in this case as Codegen.py
# treats USVString just like DOMString, but with an
# extra normalization step.
assert self.type.isDOMString()
return self
elif self.type.isString() and type.isByteString():
# Allow ByteStrings to use a default value like DOMString.
# No coercion is required as Codegen.py will handle the
# extra steps. We want to make sure that our string contains
# only valid characters, so we check that here.
valid_ascii_lit = " " + string.ascii_letters + string.digits + string.punctuation
for idx, c in enumerate(self.value):
if c not in valid_ascii_lit:
raise WebIDLError("Coercing this string literal %s to a ByteString is not supported yet. "
"Coercion failed due to an unsupported byte %d at index %d."
% (self.value.__repr__(), ord(c), idx), [location])
return IDLValue(self.location, type, self.value)
raise NoCoercionFoundError("Cannot coerce type %s to type %s." %
(self.type, type), [location])
def _getDependentObjects(self):
return set()
class IDLNullValue(IDLObject):
def __init__(self, location):
IDLObject.__init__(self, location)
self.type = None
self.value = None
def coerceToType(self, type, location):
if (not isinstance(type, IDLNullableType) and
not (type.isUnion() and type.hasNullableType) and
not (type.isUnion() and type.hasDictionaryType()) and
not type.isDictionary() and
not type.isAny()):
raise WebIDLError("Cannot coerce null value to type %s." % type,
[location])
nullValue = IDLNullValue(self.location)
if type.isUnion() and not type.nullable() and type.hasDictionaryType():
# We're actually a default value for the union's dictionary member.
# Use its type.
for t in type.flatMemberTypes:
if t.isDictionary():
nullValue.type = t
return nullValue
nullValue.type = type
return nullValue
def _getDependentObjects(self):
return set()
class IDLEmptySequenceValue(IDLObject):
def __init__(self, location):
IDLObject.__init__(self, location)
self.type = None
self.value = None
def coerceToType(self, type, location):
if type.isUnion():
# We use the flat member types here, because if we have a nullable
# member type, or a nested union, we want the type the value
# actually coerces to, not the nullable or nested union type.
for subtype in type.unroll().flatMemberTypes:
try:
return self.coerceToType(subtype, location)
except:
pass
if not type.isSequence():
raise WebIDLError("Cannot coerce empty sequence value to type %s." % type,
[location])
emptySequenceValue = IDLEmptySequenceValue(self.location)
emptySequenceValue.type = type
return emptySequenceValue
def _getDependentObjects(self):
return set()
class IDLUndefinedValue(IDLObject):
def __init__(self, location):
IDLObject.__init__(self, location)
self.type = None
self.value = None
def coerceToType(self, type, location):
if not type.isAny():
raise WebIDLError("Cannot coerce undefined value to type %s." % type,
[location])
undefinedValue = IDLUndefinedValue(self.location)
undefinedValue.type = type
return undefinedValue
def _getDependentObjects(self):
return set()
class IDLInterfaceMember(IDLObjectWithIdentifier, IDLExposureMixins):
Tags = enum(
'Const',
'Attr',
'Method',
'MaplikeOrSetlike',
'Iterable'
)
Special = enum(
'Static',
'Stringifier'
)
AffectsValues = ("Nothing", "Everything")
DependsOnValues = ("Nothing", "DOMState", "DeviceState", "Everything")
def __init__(self, location, identifier, tag, extendedAttrDict=None):
IDLObjectWithIdentifier.__init__(self, location, None, identifier)
IDLExposureMixins.__init__(self, location)
self.tag = tag
if extendedAttrDict is None:
self._extendedAttrDict = {}
else:
self._extendedAttrDict = extendedAttrDict
def isMethod(self):
return self.tag == IDLInterfaceMember.Tags.Method
def isAttr(self):
return self.tag == IDLInterfaceMember.Tags.Attr
def isConst(self):
return self.tag == IDLInterfaceMember.Tags.Const
def isMaplikeOrSetlikeOrIterable(self):
return (self.tag == IDLInterfaceMember.Tags.MaplikeOrSetlike or
self.tag == IDLInterfaceMember.Tags.Iterable)
def isMaplikeOrSetlike(self):
return self.tag == IDLInterfaceMember.Tags.MaplikeOrSetlike
def addExtendedAttributes(self, attrs):
for attr in attrs:
self.handleExtendedAttribute(attr)
attrlist = attr.listValue()
self._extendedAttrDict[attr.identifier()] = attrlist if len(attrlist) else True
def handleExtendedAttribute(self, attr):
pass
def getExtendedAttribute(self, name):
return self._extendedAttrDict.get(name, None)
def finish(self, scope):
# We better be exposed _somewhere_.
if (len(self._exposureGlobalNames) == 0):
print self.identifier.name
assert len(self._exposureGlobalNames) != 0
IDLExposureMixins.finish(self, scope)
def validate(self):
if self.isAttr() or self.isMethod():
if self.affects == "Everything" and self.dependsOn != "Everything":
raise WebIDLError("Interface member is flagged as affecting "
"everything but not depending on everything. "
"That seems rather unlikely.",
[self.location])
if self.getExtendedAttribute("NewObject"):
if self.dependsOn == "Nothing" or self.dependsOn == "DOMState":
raise WebIDLError("A [NewObject] method is not idempotent, "
"so it has to depend on something other than DOM state.",
[self.location])
if (self.getExtendedAttribute("Cached") or
self.getExtendedAttribute("StoreInSlot")):
raise WebIDLError("A [NewObject] attribute shouldnt be "
"[Cached] or [StoreInSlot], since the point "
"of those is to keep returning the same "
"thing across multiple calls, which is not "
"what [NewObject] does.",
[self.location])
def _setDependsOn(self, dependsOn):
if self.dependsOn != "Everything":
raise WebIDLError("Trying to specify multiple different DependsOn, "
"Pure, or Constant extended attributes for "
"attribute", [self.location])
if dependsOn not in IDLInterfaceMember.DependsOnValues:
raise WebIDLError("Invalid [DependsOn=%s] on attribute" % dependsOn,
[self.location])
self.dependsOn = dependsOn
def _setAffects(self, affects):
if self.affects != "Everything":
raise WebIDLError("Trying to specify multiple different Affects, "
"Pure, or Constant extended attributes for "
"attribute", [self.location])
if affects not in IDLInterfaceMember.AffectsValues:
raise WebIDLError("Invalid [Affects=%s] on attribute" % dependsOn,
[self.location])
self.affects = affects
def _addAlias(self, alias):
if alias in self.aliases:
raise WebIDLError("Duplicate [Alias=%s] on attribute" % alias,
[self.location])
self.aliases.append(alias)
class IDLMaplikeOrSetlikeOrIterableBase(IDLInterfaceMember):
def __init__(self, location, identifier, ifaceType, keyType, valueType, ifaceKind):
IDLInterfaceMember.__init__(self, location, identifier, ifaceKind)
if keyType is not None:
assert isinstance(keyType, IDLType)
else:
assert valueType is not None
assert ifaceType in ['maplike', 'setlike', 'iterable']
if valueType is not None:
assert isinstance(valueType, IDLType)
self.keyType = keyType
self.valueType = valueType
self.maplikeOrSetlikeOrIterableType = ifaceType
self.disallowedMemberNames = []
self.disallowedNonMethodNames = []
def isMaplike(self):
return self.maplikeOrSetlikeOrIterableType == "maplike"
def isSetlike(self):
return self.maplikeOrSetlikeOrIterableType == "setlike"
def isIterable(self):
return self.maplikeOrSetlikeOrIterableType == "iterable"
def hasKeyType(self):
return self.keyType is not None
def hasValueType(self):
return self.valueType is not None
def checkCollisions(self, members, isAncestor):
for member in members:
# Check that there are no disallowed members
if (member.identifier.name in self.disallowedMemberNames and
not ((member.isMethod() and member.isMaplikeOrSetlikeOrIterableMethod()) or
(member.isAttr() and member.isMaplikeOrSetlikeAttr()))):
raise WebIDLError("Member '%s' conflicts "
"with reserved %s name." %
(member.identifier.name,
self.maplikeOrSetlikeOrIterableType),
[self.location, member.location])
# Check that there are no disallowed non-method members.
# Ancestor members are always disallowed here; own members
# are disallowed only if they're non-methods.
if ((isAncestor or member.isAttr() or member.isConst()) and
member.identifier.name in self.disallowedNonMethodNames):
raise WebIDLError("Member '%s' conflicts "
"with reserved %s method." %
(member.identifier.name,
self.maplikeOrSetlikeOrIterableType),
[self.location, member.location])
def addMethod(self, name, members, allowExistingOperations, returnType, args=[],
chromeOnly=False, isPure=False, affectsNothing=False, newObject=False,
isIteratorAlias=False):
"""
Create an IDLMethod based on the parameters passed in.
- members is the member list to add this function to, since this is
called during the member expansion portion of interface object
building.
- chromeOnly is only True for read-only js implemented classes, to
implement underscore prefixed convenience functions which would
otherwise not be available, unlike the case of C++ bindings.
- isPure is only True for idempotent functions, so it is not valid for
things like keys, values, etc. that return a new object every time.
- affectsNothing means that nothing changes due to this method, which
affects JIT optimization behavior
- newObject means the method creates and returns a new object.
"""
# Only add name to lists for collision checks if it's not chrome
# only.
if chromeOnly:
name = "__" + name
else:
if not allowExistingOperations:
self.disallowedMemberNames.append(name)
else:
self.disallowedNonMethodNames.append(name)
# If allowExistingOperations is True, and another operation exists
# with the same name as the one we're trying to add, don't add the
# maplike/setlike operation. However, if the operation is static,
# then fail by way of creating the function, which will cause a
# naming conflict, per the spec.
if allowExistingOperations:
for m in members:
if m.identifier.name == name and m.isMethod() and not m.isStatic():
return
method = IDLMethod(self.location,
IDLUnresolvedIdentifier(self.location, name, allowDoubleUnderscore=chromeOnly),
returnType, args, maplikeOrSetlikeOrIterable=self)
# We need to be able to throw from declaration methods
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("Throws",))])
if chromeOnly:
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("ChromeOnly",))])
if isPure:
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("Pure",))])
# Following attributes are used for keys/values/entries. Can't mark
# them pure, since they return a new object each time they are run.
if affectsNothing:
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("DependsOn", "Everything")),
IDLExtendedAttribute(self.location, ("Affects", "Nothing"))])
if newObject:
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("NewObject",))])
if isIteratorAlias:
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("Alias", "@@iterator"))])
members.append(method)
def resolve(self, parentScope):
if self.keyType:
self.keyType.resolveType(parentScope)
if self.valueType:
self.valueType.resolveType(parentScope)
def finish(self, scope):
IDLInterfaceMember.finish(self, scope)
if self.keyType and not self.keyType.isComplete():
t = self.keyType.complete(scope)
assert not isinstance(t, IDLUnresolvedType)
assert not isinstance(t, IDLTypedefType)
assert not isinstance(t.name, IDLUnresolvedIdentifier)
self.keyType = t
if self.valueType and not self.valueType.isComplete():
t = self.valueType.complete(scope)
assert not isinstance(t, IDLUnresolvedType)
assert not isinstance(t, IDLTypedefType)
assert not isinstance(t.name, IDLUnresolvedIdentifier)
self.valueType = t
def validate(self):
IDLInterfaceMember.validate(self)
def handleExtendedAttribute(self, attr):
IDLInterfaceMember.handleExtendedAttribute(self, attr)
def _getDependentObjects(self):
deps = set()
if self.keyType:
deps.add(self.keyType)
if self.valueType:
deps.add(self.valueType)
return deps
def getForEachArguments(self):
return [IDLArgument(self.location,
IDLUnresolvedIdentifier(BuiltinLocation("<auto-generated-identifier>"),
"callback"),
BuiltinTypes[IDLBuiltinType.Types.object]),
IDLArgument(self.location,
IDLUnresolvedIdentifier(BuiltinLocation("<auto-generated-identifier>"),
"thisArg"),
BuiltinTypes[IDLBuiltinType.Types.any],
optional=True)]
# Iterable adds ES6 iterator style functions and traits
# (keys/values/entries/@@iterator) to an interface.
class IDLIterable(IDLMaplikeOrSetlikeOrIterableBase):
def __init__(self, location, identifier, keyType, valueType=None, scope=None):
IDLMaplikeOrSetlikeOrIterableBase.__init__(self, location, identifier,
"iterable", keyType, valueType,
IDLInterfaceMember.Tags.Iterable)
self.iteratorType = None
def __str__(self):
return "declared iterable with key '%s' and value '%s'" % (self.keyType, self.valueType)
def expand(self, members, isJSImplemented):
"""
In order to take advantage of all of the method machinery in Codegen,
we generate our functions as if they were part of the interface
specification during parsing.
"""
# We only need to add entries/keys/values here if we're a pair iterator.
# Value iterators just copy these from %ArrayPrototype% instead.
if not self.isPairIterator():
return
# object entries()
self.addMethod("entries", members, False, self.iteratorType,
affectsNothing=True, newObject=True,
isIteratorAlias=True)
# object keys()
self.addMethod("keys", members, False, self.iteratorType,
affectsNothing=True, newObject=True)
# object values()
self.addMethod("values", members, False, self.iteratorType,
affectsNothing=True, newObject=True)
# void forEach(callback(valueType, keyType), optional any thisArg)
self.addMethod("forEach", members, False,
BuiltinTypes[IDLBuiltinType.Types.void],
self.getForEachArguments())
def isValueIterator(self):
return not self.isPairIterator()
def isPairIterator(self):
return self.hasKeyType()
# MaplikeOrSetlike adds ES6 map-or-set-like traits to an interface.
class IDLMaplikeOrSetlike(IDLMaplikeOrSetlikeOrIterableBase):
def __init__(self, location, identifier, maplikeOrSetlikeType,
readonly, keyType, valueType):
IDLMaplikeOrSetlikeOrIterableBase.__init__(self, location, identifier, maplikeOrSetlikeType,
keyType, valueType, IDLInterfaceMember.Tags.MaplikeOrSetlike)
self.readonly = readonly
self.slotIndices = None
# When generating JSAPI access code, we need to know the backing object
# type prefix to create the correct function. Generate here for reuse.
if self.isMaplike():
self.prefix = 'Map'
elif self.isSetlike():
self.prefix = 'Set'
def __str__(self):
return "declared '%s' with key '%s'" % (self.maplikeOrSetlikeOrIterableType, self.keyType)
def expand(self, members, isJSImplemented):
"""
In order to take advantage of all of the method machinery in Codegen,
we generate our functions as if they were part of the interface
specification during parsing.
"""
# Both maplike and setlike have a size attribute
members.append(IDLAttribute(self.location,
IDLUnresolvedIdentifier(BuiltinLocation("<auto-generated-identifier>"), "size"),
BuiltinTypes[IDLBuiltinType.Types.unsigned_long],
True,
maplikeOrSetlike=self))
self.reserved_ro_names = ["size"]
self.disallowedMemberNames.append("size")
# object entries()
self.addMethod("entries", members, False, BuiltinTypes[IDLBuiltinType.Types.object],
affectsNothing=True, isIteratorAlias=self.isMaplike())
# object keys()
self.addMethod("keys", members, False, BuiltinTypes[IDLBuiltinType.Types.object],
affectsNothing=True)
# object values()
self.addMethod("values", members, False, BuiltinTypes[IDLBuiltinType.Types.object],
affectsNothing=True, isIteratorAlias=self.isSetlike())
# void forEach(callback(valueType, keyType), thisVal)
self.addMethod("forEach", members, False, BuiltinTypes[IDLBuiltinType.Types.void],
self.getForEachArguments())
def getKeyArg():
return IDLArgument(self.location,
IDLUnresolvedIdentifier(self.location, "key"),
self.keyType)
# boolean has(keyType key)
self.addMethod("has", members, False, BuiltinTypes[IDLBuiltinType.Types.boolean],
[getKeyArg()], isPure=True)
if not self.readonly:
# void clear()
self.addMethod("clear", members, True, BuiltinTypes[IDLBuiltinType.Types.void],
[])
# boolean delete(keyType key)
self.addMethod("delete", members, True,
BuiltinTypes[IDLBuiltinType.Types.boolean], [getKeyArg()])
# Always generate underscored functions (e.g. __add, __clear) for js
# implemented interfaces as convenience functions.
if isJSImplemented:
# void clear()
self.addMethod("clear", members, True, BuiltinTypes[IDLBuiltinType.Types.void],
[], chromeOnly=True)
# boolean delete(keyType key)
self.addMethod("delete", members, True,
BuiltinTypes[IDLBuiltinType.Types.boolean], [getKeyArg()],
chromeOnly=True)
if self.isSetlike():
if not self.readonly:
# Add returns the set object it just added to.
# object add(keyType key)
self.addMethod("add", members, True,
BuiltinTypes[IDLBuiltinType.Types.object], [getKeyArg()])
if isJSImplemented:
self.addMethod("add", members, True,
BuiltinTypes[IDLBuiltinType.Types.object], [getKeyArg()],
chromeOnly=True)
return
# If we get this far, we're a maplike declaration.
# valueType get(keyType key)
#
# Note that instead of the value type, we're using any here. The
# validity checks should happen as things are inserted into the map,
# and using any as the return type makes code generation much simpler.
#
# TODO: Bug 1155340 may change this to use specific type to provide
# more info to JIT.
self.addMethod("get", members, False, BuiltinTypes[IDLBuiltinType.Types.any],
[getKeyArg()], isPure=True)
def getValueArg():
return IDLArgument(self.location,
IDLUnresolvedIdentifier(self.location, "value"),
self.valueType)
if not self.readonly:
self.addMethod("set", members, True, BuiltinTypes[IDLBuiltinType.Types.object],
[getKeyArg(), getValueArg()])
if isJSImplemented:
self.addMethod("set", members, True, BuiltinTypes[IDLBuiltinType.Types.object],
[getKeyArg(), getValueArg()], chromeOnly=True)
class IDLConst(IDLInterfaceMember):
def __init__(self, location, identifier, type, value):
IDLInterfaceMember.__init__(self, location, identifier,
IDLInterfaceMember.Tags.Const)
assert isinstance(type, IDLType)
if type.isDictionary():
raise WebIDLError("A constant cannot be of a dictionary type",
[self.location])
if type.isRecord():
raise WebIDLError("A constant cannot be of a record type",
[self.location])
self.type = type
self.value = value
if identifier.name == "prototype":
raise WebIDLError("The identifier of a constant must not be 'prototype'",
[location])
def __str__(self):
return "'%s' const '%s'" % (self.type, self.identifier)
def finish(self, scope):
IDLInterfaceMember.finish(self, scope)
if not self.type.isComplete():
type = self.type.complete(scope)
if not type.isPrimitive() and not type.isString():
locations = [self.type.location, type.location]
try:
locations.append(type.inner.location)
except:
pass
raise WebIDLError("Incorrect type for constant", locations)
self.type = type
# The value might not match the type
coercedValue = self.value.coerceToType(self.type, self.location)
assert coercedValue
self.value = coercedValue
def validate(self):
IDLInterfaceMember.validate(self)
def handleExtendedAttribute(self, attr):
identifier = attr.identifier()
if identifier == "Exposed":
convertExposedAttrToGlobalNameSet(attr, self._exposureGlobalNames)
elif (identifier == "Pref" or
identifier == "ChromeOnly" or
identifier == "Func" or
identifier == "SecureContext"):
# Known attributes that we don't need to do anything with here
pass
else:
raise WebIDLError("Unknown extended attribute %s on constant" % identifier,
[attr.location])
IDLInterfaceMember.handleExtendedAttribute(self, attr)
def _getDependentObjects(self):
return set([self.type, self.value])
class IDLAttribute(IDLInterfaceMember):
def __init__(self, location, identifier, type, readonly, inherit=False,
static=False, stringifier=False, maplikeOrSetlike=None,
extendedAttrDict=None, navigatorObjectGetter=False):
IDLInterfaceMember.__init__(self, location, identifier,
IDLInterfaceMember.Tags.Attr,
extendedAttrDict=extendedAttrDict)
assert isinstance(type, IDLType)
self.type = type
self.readonly = readonly
self.inherit = inherit
self._static = static
self.lenientThis = False
self._unforgeable = False
self.stringifier = stringifier
self.enforceRange = False
self.clamp = False
self.slotIndices = None
assert maplikeOrSetlike is None or isinstance(maplikeOrSetlike, IDLMaplikeOrSetlike)
self.maplikeOrSetlike = maplikeOrSetlike
self.dependsOn = "Everything"
self.affects = "Everything"
self.navigatorObjectGetter = navigatorObjectGetter
if static and identifier.name == "prototype":
raise WebIDLError("The identifier of a static attribute must not be 'prototype'",
[location])
if readonly and inherit:
raise WebIDLError("An attribute cannot be both 'readonly' and 'inherit'",
[self.location])
def isStatic(self):
return self._static
def forceStatic(self):
self._static = True
def __str__(self):
return "'%s' attribute '%s'" % (self.type, self.identifier)
def finish(self, scope):
IDLInterfaceMember.finish(self, scope)
if not self.type.isComplete():
t = self.type.complete(scope)
assert not isinstance(t, IDLUnresolvedType)
assert not isinstance(t, IDLTypedefType)
assert not isinstance(t.name, IDLUnresolvedIdentifier)
self.type = t
if self.type.isDictionary() and not self.getExtendedAttribute("Cached"):
raise WebIDLError("An attribute cannot be of a dictionary type",
[self.location])
if self.type.isSequence() and not self.getExtendedAttribute("Cached"):
raise WebIDLError("A non-cached attribute cannot be of a sequence "
"type", [self.location])
if self.type.isRecord() and not self.getExtendedAttribute("Cached"):
raise WebIDLError("A non-cached attribute cannot be of a record "
"type", [self.location])
if self.type.isUnion():
for f in self.type.unroll().flatMemberTypes:
if f.isDictionary():
raise WebIDLError("An attribute cannot be of a union "
"type if one of its member types (or "
"one of its member types's member "
"types, and so on) is a dictionary "
"type", [self.location, f.location])
if f.isSequence():
raise WebIDLError("An attribute cannot be of a union "
"type if one of its member types (or "
"one of its member types's member "
"types, and so on) is a sequence "
"type", [self.location, f.location])
if f.isRecord():
raise WebIDLError("An attribute cannot be of a union "
"type if one of its member types (or "
"one of its member types's member "
"types, and so on) is a record "
"type", [self.location, f.location])
if not self.type.isInterface() and self.getExtendedAttribute("PutForwards"):
raise WebIDLError("An attribute with [PutForwards] must have an "
"interface type as its type", [self.location])
if (not self.type.isInterface() and
self.getExtendedAttribute("SameObject")):
raise WebIDLError("An attribute with [SameObject] must have an "
"interface type as its type", [self.location])
if self.type.isPromise() and not self.readonly:
raise WebIDLError("Promise-returning attributes must be readonly",
[self.location])
def validate(self):
def typeContainsChromeOnlyDictionaryMember(type):
if (type.nullable() or
type.isSequence() or
type.isRecord()):
return typeContainsChromeOnlyDictionaryMember(type.inner)
if type.isUnion():
for memberType in type.flatMemberTypes:
(contains, location) = typeContainsChromeOnlyDictionaryMember(memberType)
if contains:
return (True, location)
if type.isDictionary():
dictionary = type.inner
while dictionary:
(contains, location) = dictionaryContainsChromeOnlyMember(dictionary)
if contains:
return (True, location)
dictionary = dictionary.parent
return (False, None)
def dictionaryContainsChromeOnlyMember(dictionary):
for member in dictionary.members:
if member.getExtendedAttribute("ChromeOnly"):
return (True, member.location)
(contains, location) = typeContainsChromeOnlyDictionaryMember(member.type)
if contains:
return (True, location)
return (False, None)
IDLInterfaceMember.validate(self)
if (self.getExtendedAttribute("Cached") or
self.getExtendedAttribute("StoreInSlot")):
if not self.affects == "Nothing":
raise WebIDLError("Cached attributes and attributes stored in "
"slots must be Constant or Pure or "
"Affects=Nothing, since the getter won't always "
"be called.",
[self.location])
(contains, location) = typeContainsChromeOnlyDictionaryMember(self.type)
if contains:
raise WebIDLError("[Cached] and [StoreInSlot] must not be used "
"on an attribute whose type contains a "
"[ChromeOnly] dictionary member",
[self.location, location])
if self.getExtendedAttribute("Frozen"):
if (not self.type.isSequence() and not self.type.isDictionary() and
not self.type.isRecord()):
raise WebIDLError("[Frozen] is only allowed on "
"sequence-valued, dictionary-valued, and "
"record-valued attributes",
[self.location])
if not self.type.unroll().isExposedInAllOf(self.exposureSet):
raise WebIDLError("Attribute returns a type that is not exposed "
"everywhere where the attribute is exposed",
[self.location])
if self.getExtendedAttribute("CEReactions"):
if self.readonly:
raise WebIDLError("[CEReactions] is not allowed on "
"readonly attributes",
[self.location])
def handleExtendedAttribute(self, attr):
identifier = attr.identifier()
if ((identifier == "SetterThrows" or identifier == "SetterCanOOM")
and self.readonly):
raise WebIDLError("Readonly attributes must not be flagged as "
"[%s]" % identifier,
[self.location])
elif (((identifier == "Throws" or identifier == "GetterThrows" or
identifier == "CanOOM" or identifier == "GetterCanOOM") and
self.getExtendedAttribute("StoreInSlot")) or
(identifier == "StoreInSlot" and
(self.getExtendedAttribute("Throws") or
self.getExtendedAttribute("GetterThrows") or
self.getExtendedAttribute("CanOOM") or
self.getExtendedAttribute("GetterCanOOM")))):
raise WebIDLError("Throwing things can't be [StoreInSlot]",
[attr.location])
elif identifier == "LenientThis":
if not attr.noArguments():
raise WebIDLError("[LenientThis] must take no arguments",
[attr.location])
if self.isStatic():
raise WebIDLError("[LenientThis] is only allowed on non-static "
"attributes", [attr.location, self.location])
if self.getExtendedAttribute("CrossOriginReadable"):
raise WebIDLError("[LenientThis] is not allowed in combination "
"with [CrossOriginReadable]",
[attr.location, self.location])
if self.getExtendedAttribute("CrossOriginWritable"):
raise WebIDLError("[LenientThis] is not allowed in combination "
"with [CrossOriginWritable]",
[attr.location, self.location])
self.lenientThis = True
elif identifier == "Unforgeable":
if self.isStatic():
raise WebIDLError("[Unforgeable] is only allowed on non-static "
"attributes", [attr.location, self.location])
self._unforgeable = True
elif identifier == "SameObject" and not self.readonly:
raise WebIDLError("[SameObject] only allowed on readonly attributes",
[attr.location, self.location])
elif identifier == "Constant" and not self.readonly:
raise WebIDLError("[Constant] only allowed on readonly attributes",
[attr.location, self.location])
elif identifier == "PutForwards":
if not self.readonly:
raise WebIDLError("[PutForwards] is only allowed on readonly "
"attributes", [attr.location, self.location])
if self.type.isPromise():
raise WebIDLError("[PutForwards] is not allowed on "
"Promise-typed attributes",
[attr.location, self.location])
if self.isStatic():
raise WebIDLError("[PutForwards] is only allowed on non-static "
"attributes", [attr.location, self.location])
if self.getExtendedAttribute("Replaceable") is not None:
raise WebIDLError("[PutForwards] and [Replaceable] can't both "
"appear on the same attribute",
[attr.location, self.location])
if not attr.hasValue():
raise WebIDLError("[PutForwards] takes an identifier",
[attr.location, self.location])
elif identifier == "Replaceable":
if not attr.noArguments():
raise WebIDLError("[Replaceable] must take no arguments",
[attr.location])
if not self.readonly:
raise WebIDLError("[Replaceable] is only allowed on readonly "
"attributes", [attr.location, self.location])
if self.type.isPromise():
raise WebIDLError("[Replaceable] is not allowed on "
"Promise-typed attributes",
[attr.location, self.location])
if self.isStatic():
raise WebIDLError("[Replaceable] is only allowed on non-static "
"attributes", [attr.location, self.location])
if self.getExtendedAttribute("PutForwards") is not None:
raise WebIDLError("[PutForwards] and [Replaceable] can't both "
"appear on the same attribute",
[attr.location, self.location])
elif identifier == "LenientSetter":
if not attr.noArguments():
raise WebIDLError("[LenientSetter] must take no arguments",
[attr.location])
if not self.readonly:
raise WebIDLError("[LenientSetter] is only allowed on readonly "
"attributes", [attr.location, self.location])
if self.type.isPromise():
raise WebIDLError("[LenientSetter] is not allowed on "
"Promise-typed attributes",
[attr.location, self.location])
if self.isStatic():
raise WebIDLError("[LenientSetter] is only allowed on non-static "
"attributes", [attr.location, self.location])
if self.getExtendedAttribute("PutForwards") is not None:
raise WebIDLError("[LenientSetter] and [PutForwards] can't both "
"appear on the same attribute",
[attr.location, self.location])
if self.getExtendedAttribute("Replaceable") is not None:
raise WebIDLError("[LenientSetter] and [Replaceable] can't both "
"appear on the same attribute",
[attr.location, self.location])
elif identifier == "LenientFloat":
if self.readonly:
raise WebIDLError("[LenientFloat] used on a readonly attribute",
[attr.location, self.location])
if not self.type.includesRestrictedFloat():
raise WebIDLError("[LenientFloat] used on an attribute with a "
"non-restricted-float type",
[attr.location, self.location])
elif identifier == "EnforceRange":
if self.readonly:
raise WebIDLError("[EnforceRange] used on a readonly attribute",
[attr.location, self.location])
self.enforceRange = True
elif identifier == "Clamp":
if self.readonly:
raise WebIDLError("[Clamp] used on a readonly attribute",
[attr.location, self.location])
self.clamp = True
elif identifier == "StoreInSlot":
if self.getExtendedAttribute("Cached"):
raise WebIDLError("[StoreInSlot] and [Cached] must not be "
"specified on the same attribute",
[attr.location, self.location])
elif identifier == "Cached":
if self.getExtendedAttribute("StoreInSlot"):
raise WebIDLError("[Cached] and [StoreInSlot] must not be "
"specified on the same attribute",
[attr.location, self.location])
elif (identifier == "CrossOriginReadable" or
identifier == "CrossOriginWritable"):
if not attr.noArguments() and identifier == "CrossOriginReadable":
raise WebIDLError("[%s] must take no arguments" % identifier,
[attr.location])
if self.isStatic():
raise WebIDLError("[%s] is only allowed on non-static "
"attributes" % identifier,
[attr.location, self.location])
if self.getExtendedAttribute("LenientThis"):
raise WebIDLError("[LenientThis] is not allowed in combination "
"with [%s]" % identifier,
[attr.location, self.location])
elif identifier == "Exposed":
convertExposedAttrToGlobalNameSet(attr, self._exposureGlobalNames)
elif identifier == "Pure":
if not attr.noArguments():
raise WebIDLError("[Pure] must take no arguments",
[attr.location])
self._setDependsOn("DOMState")
self._setAffects("Nothing")
elif identifier == "Constant" or identifier == "SameObject":
if not attr.noArguments():
raise WebIDLError("[%s] must take no arguments" % identifier,
[attr.location])
self._setDependsOn("Nothing")
self._setAffects("Nothing")
elif identifier == "Affects":
if not attr.hasValue():
raise WebIDLError("[Affects] takes an identifier",
[attr.location])
self._setAffects(attr.value())
elif identifier == "DependsOn":
if not attr.hasValue():
raise WebIDLError("[DependsOn] takes an identifier",
[attr.location])
if (attr.value() != "Everything" and attr.value() != "DOMState" and
not self.readonly):
raise WebIDLError("[DependsOn=%s] only allowed on "
"readonly attributes" % attr.value(),
[attr.location, self.location])
self._setDependsOn(attr.value())
elif identifier == "UseCounter":
if self.stringifier:
raise WebIDLError("[UseCounter] must not be used on a "
"stringifier attribute",
[attr.location, self.location])
elif identifier == "Unscopable":
if not attr.noArguments():
raise WebIDLError("[Unscopable] must take no arguments",
[attr.location])
if self.isStatic():
raise WebIDLError("[Unscopable] is only allowed on non-static "
"attributes and operations",
[attr.location, self.location])
elif identifier == "CEReactions":
if not attr.noArguments():
raise WebIDLError("[CEReactions] must take no arguments",
[attr.location])
elif (identifier == "Pref" or
identifier == "Deprecated" or
identifier == "SetterThrows" or
identifier == "Throws" or
identifier == "GetterThrows" or
identifier == "SetterCanOOM" or
identifier == "CanOOM" or
identifier == "GetterCanOOM" or
identifier == "ChromeOnly" or
identifier == "Func" or
identifier == "SecureContext" or
identifier == "Frozen" or
identifier == "NewObject" or
identifier == "UnsafeInPrerendering" or
identifier == "NeedsSubjectPrincipal" or
identifier == "NeedsCallerType" or
identifier == "ReturnValueNeedsContainsHack" or
identifier == "BinaryName"):
# Known attributes that we don't need to do anything with here
pass
else:
raise WebIDLError("Unknown extended attribute %s on attribute" % identifier,
[attr.location])
IDLInterfaceMember.handleExtendedAttribute(self, attr)
def resolve(self, parentScope):
assert isinstance(parentScope, IDLScope)
self.type.resolveType(parentScope)
IDLObjectWithIdentifier.resolve(self, parentScope)
def addExtendedAttributes(self, attrs):
attrs = self.checkForStringHandlingExtendedAttributes(attrs)
IDLInterfaceMember.addExtendedAttributes(self, attrs)
def hasLenientThis(self):
return self.lenientThis
def isMaplikeOrSetlikeAttr(self):
"""
True if this attribute was generated from an interface with
maplike/setlike (e.g. this is the size attribute for
maplike/setlike)
"""
return self.maplikeOrSetlike is not None
def isUnforgeable(self):
return self._unforgeable
def _getDependentObjects(self):
return set([self.type])
class IDLArgument(IDLObjectWithIdentifier):
def __init__(self, location, identifier, type, optional=False, defaultValue=None, variadic=False, dictionaryMember=False):
IDLObjectWithIdentifier.__init__(self, location, None, identifier)
assert isinstance(type, IDLType)
self.type = type
self.optional = optional
self.defaultValue = defaultValue
self.variadic = variadic
self.dictionaryMember = dictionaryMember
self._isComplete = False
self.enforceRange = False
self.clamp = False
self._allowTreatNonCallableAsNull = False
self._extendedAttrDict = {}
assert not variadic or optional
assert not variadic or not defaultValue
def addExtendedAttributes(self, attrs):
attrs = self.checkForStringHandlingExtendedAttributes(
attrs,
isDictionaryMember=self.dictionaryMember,
isOptional=self.optional)
for attribute in attrs:
identifier = attribute.identifier()
if identifier == "Clamp":
if not attribute.noArguments():
raise WebIDLError("[Clamp] must take no arguments",
[attribute.location])
if self.enforceRange:
raise WebIDLError("[EnforceRange] and [Clamp] are mutually exclusive",
[self.location])
self.clamp = True
elif identifier == "EnforceRange":
if not attribute.noArguments():
raise WebIDLError("[EnforceRange] must take no arguments",
[attribute.location])
if self.clamp:
raise WebIDLError("[EnforceRange] and [Clamp] are mutually exclusive",
[self.location])
self.enforceRange = True
elif identifier == "TreatNonCallableAsNull":
self._allowTreatNonCallableAsNull = True
elif (self.dictionaryMember and
(identifier == "ChromeOnly" or identifier == "Func")):
if not self.optional:
raise WebIDLError("[%s] must not be used on a required "
"dictionary member" % identifier,
[attribute.location])
else:
raise WebIDLError("Unhandled extended attribute on %s" %
("a dictionary member" if self.dictionaryMember else
"an argument"),
[attribute.location])
attrlist = attribute.listValue()
self._extendedAttrDict[identifier] = attrlist if len(attrlist) else True
def getExtendedAttribute(self, name):
return self._extendedAttrDict.get(name, None)
def isComplete(self):
return self._isComplete
def complete(self, scope):
if self._isComplete:
return
self._isComplete = True
if not self.type.isComplete():
type = self.type.complete(scope)
assert not isinstance(type, IDLUnresolvedType)
assert not isinstance(type, IDLTypedefType)
assert not isinstance(type.name, IDLUnresolvedIdentifier)
self.type = type
if ((self.type.isDictionary() or
self.type.isUnion() and self.type.unroll().hasDictionaryType()) and
self.optional and not self.defaultValue and not self.variadic):
# Default optional non-variadic dictionaries to null,
# for simplicity, so the codegen doesn't have to special-case this.
self.defaultValue = IDLNullValue(self.location)
elif self.type.isAny():
assert (self.defaultValue is None or
isinstance(self.defaultValue, IDLNullValue))
# optional 'any' values always have a default value
if self.optional and not self.defaultValue and not self.variadic:
# Set the default value to undefined, for simplicity, so the
# codegen doesn't have to special-case this.
self.defaultValue = IDLUndefinedValue(self.location)
# Now do the coercing thing; this needs to happen after the
# above creation of a default value.
if self.defaultValue:
self.defaultValue = self.defaultValue.coerceToType(self.type,
self.location)
assert self.defaultValue
def allowTreatNonCallableAsNull(self):
return self._allowTreatNonCallableAsNull
def _getDependentObjects(self):
deps = set([self.type])
if self.defaultValue:
deps.add(self.defaultValue)
return deps
def canHaveMissingValue(self):
return self.optional and not self.defaultValue
class IDLCallback(IDLObjectWithScope):
def __init__(self, location, parentScope, identifier, returnType, arguments):
assert isinstance(returnType, IDLType)
self._returnType = returnType
# Clone the list
self._arguments = list(arguments)
IDLObjectWithScope.__init__(self, location, parentScope, identifier)
for (returnType, arguments) in self.signatures():
for argument in arguments:
argument.resolve(self)
self._treatNonCallableAsNull = False
self._treatNonObjectAsNull = False
def isCallback(self):
return True
def signatures(self):
return [(self._returnType, self._arguments)]
def finish(self, scope):
if not self._returnType.isComplete():
type = self._returnType.complete(scope)
assert not isinstance(type, IDLUnresolvedType)
assert not isinstance(type, IDLTypedefType)
assert not isinstance(type.name, IDLUnresolvedIdentifier)
self._returnType = type
for argument in self._arguments:
if argument.type.isComplete():
continue
type = argument.type.complete(scope)
assert not isinstance(type, IDLUnresolvedType)
assert not isinstance(type, IDLTypedefType)
assert not isinstance(type.name, IDLUnresolvedIdentifier)
argument.type = type
def validate(self):
pass
def addExtendedAttributes(self, attrs):
unhandledAttrs = []
for attr in attrs:
if attr.identifier() == "TreatNonCallableAsNull":
self._treatNonCallableAsNull = True
elif attr.identifier() == "TreatNonObjectAsNull":
self._treatNonObjectAsNull = True
else:
unhandledAttrs.append(attr)
if self._treatNonCallableAsNull and self._treatNonObjectAsNull:
raise WebIDLError("Cannot specify both [TreatNonCallableAsNull] "
"and [TreatNonObjectAsNull]", [self.location])
if len(unhandledAttrs) != 0:
IDLType.addExtendedAttributes(self, unhandledAttrs)
def _getDependentObjects(self):
return set([self._returnType] + self._arguments)
class IDLCallbackType(IDLType):
def __init__(self, location, callback):
IDLType.__init__(self, location, callback.identifier.name)
self.callback = callback
def isCallback(self):
return True
def tag(self):
return IDLType.Tags.callback
def isDistinguishableFrom(self, other):
if other.isPromise():
return False
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isNonCallbackInterface() or other.isDate() or
other.isSequence())
def _getDependentObjects(self):
return self.callback._getDependentObjects()
class IDLMethodOverload:
"""
A class that represents a single overload of a WebIDL method. This is not
quite the same as an element of the "effective overload set" in the spec,
because separate IDLMethodOverloads are not created based on arguments being
optional. Rather, when multiple methods have the same name, there is an
IDLMethodOverload for each one, all hanging off an IDLMethod representing
the full set of overloads.
"""
def __init__(self, returnType, arguments, location):
self.returnType = returnType
# Clone the list of arguments, just in case
self.arguments = list(arguments)
self.location = location
def _getDependentObjects(self):
deps = set(self.arguments)
deps.add(self.returnType)
return deps
class IDLMethod(IDLInterfaceMember, IDLScope):
Special = enum(
'Getter',
'Setter',
'Creator',
'Deleter',
'LegacyCaller',
base=IDLInterfaceMember.Special
)
NamedOrIndexed = enum(
'Neither',
'Named',
'Indexed'
)
def __init__(self, location, identifier, returnType, arguments,
static=False, getter=False, setter=False, creator=False,
deleter=False, specialType=NamedOrIndexed.Neither,
legacycaller=False, stringifier=False, jsonifier=False,
maplikeOrSetlikeOrIterable=None, htmlConstructor=False):
# REVIEW: specialType is NamedOrIndexed -- wow, this is messed up.
IDLInterfaceMember.__init__(self, location, identifier,
IDLInterfaceMember.Tags.Method)
self._hasOverloads = False
assert isinstance(returnType, IDLType)
# self._overloads is a list of IDLMethodOverloads
self._overloads = [IDLMethodOverload(returnType, arguments, location)]
assert isinstance(static, bool)
self._static = static
assert isinstance(getter, bool)
self._getter = getter
assert isinstance(setter, bool)
self._setter = setter
assert isinstance(creator, bool)
self._creator = creator
assert isinstance(deleter, bool)
self._deleter = deleter
assert isinstance(legacycaller, bool)
self._legacycaller = legacycaller
assert isinstance(stringifier, bool)
self._stringifier = stringifier
assert isinstance(jsonifier, bool)
self._jsonifier = jsonifier
assert maplikeOrSetlikeOrIterable is None or isinstance(maplikeOrSetlikeOrIterable, IDLMaplikeOrSetlikeOrIterableBase)
self.maplikeOrSetlikeOrIterable = maplikeOrSetlikeOrIterable
assert isinstance(htmlConstructor, bool)
# The identifier of a HTMLConstructor must be 'constructor'.
assert not htmlConstructor or identifier.name == "constructor"
self._htmlConstructor = htmlConstructor
self._specialType = specialType
self._unforgeable = False
self.dependsOn = "Everything"
self.affects = "Everything"
self.aliases = []
if static and identifier.name == "prototype":
raise WebIDLError("The identifier of a static operation must not be 'prototype'",
[location])
self.assertSignatureConstraints()
def __str__(self):
return "Method '%s'" % self.identifier
def assertSignatureConstraints(self):
if self._getter or self._deleter:
assert len(self._overloads) == 1
overload = self._overloads[0]
arguments = overload.arguments
assert len(arguments) == 1
assert (arguments[0].type == BuiltinTypes[IDLBuiltinType.Types.domstring] or
arguments[0].type == BuiltinTypes[IDLBuiltinType.Types.unsigned_long])
assert not arguments[0].optional and not arguments[0].variadic
assert not self._getter or not overload.returnType.isVoid()
if self._setter or self._creator:
assert len(self._overloads) == 1
arguments = self._overloads[0].arguments
assert len(arguments) == 2
assert (arguments[0].type == BuiltinTypes[IDLBuiltinType.Types.domstring] or
arguments[0].type == BuiltinTypes[IDLBuiltinType.Types.unsigned_long])
assert not arguments[0].optional and not arguments[0].variadic
assert not arguments[1].optional and not arguments[1].variadic
if self._stringifier:
assert len(self._overloads) == 1
overload = self._overloads[0]
assert len(overload.arguments) == 0
assert overload.returnType == BuiltinTypes[IDLBuiltinType.Types.domstring]
if self._jsonifier:
assert len(self._overloads) == 1
overload = self._overloads[0]
assert len(overload.arguments) == 0
assert overload.returnType == BuiltinTypes[IDLBuiltinType.Types.object]
def isStatic(self):
return self._static
def forceStatic(self):
self._static = True
def isGetter(self):
return self._getter
def isSetter(self):
return self._setter
def isCreator(self):
return self._creator
def isDeleter(self):
return self._deleter
def isNamed(self):
assert (self._specialType == IDLMethod.NamedOrIndexed.Named or
self._specialType == IDLMethod.NamedOrIndexed.Indexed)
return self._specialType == IDLMethod.NamedOrIndexed.Named
def isIndexed(self):
assert (self._specialType == IDLMethod.NamedOrIndexed.Named or
self._specialType == IDLMethod.NamedOrIndexed.Indexed)
return self._specialType == IDLMethod.NamedOrIndexed.Indexed
def isLegacycaller(self):
return self._legacycaller
def isStringifier(self):
return self._stringifier
def isJsonifier(self):
return self._jsonifier
def isMaplikeOrSetlikeOrIterableMethod(self):
"""
True if this method was generated as part of a
maplike/setlike/etc interface (e.g. has/get methods)
"""
return self.maplikeOrSetlikeOrIterable is not None
def isSpecial(self):
return (self.isGetter() or
self.isSetter() or
self.isCreator() or
self.isDeleter() or
self.isLegacycaller() or
self.isStringifier() or
self.isJsonifier())
def isHTMLConstructor(self):
return self._htmlConstructor
def hasOverloads(self):
return self._hasOverloads
def isIdentifierLess(self):
"""
True if the method name started with __, and if the method is not a
maplike/setlike method. Interfaces with maplike/setlike will generate
methods starting with __ for chrome only backing object access in JS
implemented interfaces, so while these functions use what is considered
an non-identifier name, they actually DO have an identifier.
"""
return (self.identifier.name[:2] == "__" and
not self.isMaplikeOrSetlikeOrIterableMethod())
def resolve(self, parentScope):
assert isinstance(parentScope, IDLScope)
IDLObjectWithIdentifier.resolve(self, parentScope)
IDLScope.__init__(self, self.location, parentScope, self.identifier)
for (returnType, arguments) in self.signatures():
for argument in arguments:
argument.resolve(self)
def addOverload(self, method):
assert len(method._overloads) == 1
if self._extendedAttrDict != method ._extendedAttrDict:
raise WebIDLError("Extended attributes differ on different "
"overloads of %s" % method.identifier,
[self.location, method.location])
self._overloads.extend(method._overloads)
self._hasOverloads = True
if self.isStatic() != method.isStatic():
raise WebIDLError("Overloaded identifier %s appears with different values of the 'static' attribute" % method.identifier,
[method.location])
if self.isLegacycaller() != method.isLegacycaller():
raise WebIDLError("Overloaded identifier %s appears with different values of the 'legacycaller' attribute" % method.identifier,
[method.location])
# Can't overload special things!
assert not self.isGetter()
assert not method.isGetter()
assert not self.isSetter()
assert not method.isSetter()
assert not self.isCreator()
assert not method.isCreator()
assert not self.isDeleter()
assert not method.isDeleter()
assert not self.isStringifier()
assert not method.isStringifier()
assert not self.isJsonifier()
assert not method.isJsonifier()
assert not self.isHTMLConstructor()
assert not method.isHTMLConstructor()
return self
def signatures(self):
return [(overload.returnType, overload.arguments) for overload in
self._overloads]
def finish(self, scope):
IDLInterfaceMember.finish(self, scope)
for overload in self._overloads:
returnType = overload.returnType
if not returnType.isComplete():
returnType = returnType.complete(scope)
assert not isinstance(returnType, IDLUnresolvedType)
assert not isinstance(returnType, IDLTypedefType)
assert not isinstance(returnType.name, IDLUnresolvedIdentifier)
overload.returnType = returnType
for argument in overload.arguments:
if not argument.isComplete():
argument.complete(scope)
assert argument.type.isComplete()
# Now compute various information that will be used by the
# WebIDL overload resolution algorithm.
self.maxArgCount = max(len(s[1]) for s in self.signatures())
self.allowedArgCounts = [i for i in range(self.maxArgCount+1)
if len(self.signaturesForArgCount(i)) != 0]
def validate(self):
IDLInterfaceMember.validate(self)
# Make sure our overloads are properly distinguishable and don't have
# different argument types before the distinguishing args.
for argCount in self.allowedArgCounts:
possibleOverloads = self.overloadsForArgCount(argCount)
if len(possibleOverloads) == 1:
continue
distinguishingIndex = self.distinguishingIndexForArgCount(argCount)
for idx in range(distinguishingIndex):
firstSigType = possibleOverloads[0].arguments[idx].type
for overload in possibleOverloads[1:]:
if overload.arguments[idx].type != firstSigType:
raise WebIDLError(
"Signatures for method '%s' with %d arguments have "
"different types of arguments at index %d, which "
"is before distinguishing index %d" %
(self.identifier.name, argCount, idx,
distinguishingIndex),
[self.location, overload.location])
overloadWithPromiseReturnType = None
overloadWithoutPromiseReturnType = None
for overload in self._overloads:
returnType = overload.returnType
if not returnType.unroll().isExposedInAllOf(self.exposureSet):
raise WebIDLError("Overload returns a type that is not exposed "
"everywhere where the method is exposed",
[overload.location])
variadicArgument = None
arguments = overload.arguments
for (idx, argument) in enumerate(arguments):
assert argument.type.isComplete()
if ((argument.type.isDictionary() and
argument.type.inner.canBeEmpty())or
(argument.type.isUnion() and
argument.type.unroll().hasPossiblyEmptyDictionaryType())):
# Optional dictionaries and unions containing optional
# dictionaries at the end of the list or followed by
# optional arguments must be optional.
if (not argument.optional and
all(arg.optional for arg in arguments[idx+1:])):
raise WebIDLError("Dictionary argument without any "
"required fields or union argument "
"containing such dictionary not "
"followed by a required argument "
"must be optional",
[argument.location])
# An argument cannot be a Nullable Dictionary
if argument.type.nullable():
raise WebIDLError("An argument cannot be a nullable "
"dictionary or nullable union "
"containing a dictionary",
[argument.location])
# Only the last argument can be variadic
if variadicArgument:
raise WebIDLError("Variadic argument is not last argument",
[variadicArgument.location])
if argument.variadic:
variadicArgument = argument
if returnType.isPromise():
overloadWithPromiseReturnType = overload
else:
overloadWithoutPromiseReturnType = overload
# Make sure either all our overloads return Promises or none do
if overloadWithPromiseReturnType and overloadWithoutPromiseReturnType:
raise WebIDLError("We have overloads with both Promise and "
"non-Promise return types",
[overloadWithPromiseReturnType.location,
overloadWithoutPromiseReturnType.location])
if overloadWithPromiseReturnType and self._legacycaller:
raise WebIDLError("May not have a Promise return type for a "
"legacycaller.",
[overloadWithPromiseReturnType.location])
if self.getExtendedAttribute("StaticClassOverride") and not \
(self.identifier.scope.isJSImplemented() and self.isStatic()):
raise WebIDLError("StaticClassOverride can be applied to static"
" methods on JS-implemented classes only.",
[self.location])
def overloadsForArgCount(self, argc):
return [overload for overload in self._overloads if
len(overload.arguments) == argc or
(len(overload.arguments) > argc and
all(arg.optional for arg in overload.arguments[argc:])) or
(len(overload.arguments) < argc and
len(overload.arguments) > 0 and
overload.arguments[-1].variadic)]
def signaturesForArgCount(self, argc):
return [(overload.returnType, overload.arguments) for overload
in self.overloadsForArgCount(argc)]
def locationsForArgCount(self, argc):
return [overload.location for overload in self.overloadsForArgCount(argc)]
def distinguishingIndexForArgCount(self, argc):
def isValidDistinguishingIndex(idx, signatures):
for (firstSigIndex, (firstRetval, firstArgs)) in enumerate(signatures[:-1]):
for (secondRetval, secondArgs) in signatures[firstSigIndex+1:]:
if idx < len(firstArgs):
firstType = firstArgs[idx].type
else:
assert(firstArgs[-1].variadic)
firstType = firstArgs[-1].type
if idx < len(secondArgs):
secondType = secondArgs[idx].type
else:
assert(secondArgs[-1].variadic)
secondType = secondArgs[-1].type
if not firstType.isDistinguishableFrom(secondType):
return False
return True
signatures = self.signaturesForArgCount(argc)
for idx in range(argc):
if isValidDistinguishingIndex(idx, signatures):
return idx
# No valid distinguishing index. Time to throw
locations = self.locationsForArgCount(argc)
raise WebIDLError("Signatures with %d arguments for method '%s' are not "
"distinguishable" % (argc, self.identifier.name),
locations)
def handleExtendedAttribute(self, attr):
identifier = attr.identifier()
if (identifier == "GetterThrows" or
identifier == "SetterThrows" or
identifier == "GetterCanOOM" or
identifier == "SetterCanOOM"):
raise WebIDLError("Methods must not be flagged as "
"[%s]" % identifier,
[attr.location, self.location])
elif identifier == "Unforgeable":
if self.isStatic():
raise WebIDLError("[Unforgeable] is only allowed on non-static "
"methods", [attr.location, self.location])
self._unforgeable = True
elif identifier == "SameObject":
raise WebIDLError("Methods must not be flagged as [SameObject]",
[attr.location, self.location])
elif identifier == "Constant":
raise WebIDLError("Methods must not be flagged as [Constant]",
[attr.location, self.location])
elif identifier == "PutForwards":
raise WebIDLError("Only attributes support [PutForwards]",
[attr.location, self.location])
elif identifier == "LenientSetter":
raise WebIDLError("Only attributes support [LenientSetter]",
[attr.location, self.location])
elif identifier == "LenientFloat":
# This is called before we've done overload resolution
assert len(self.signatures()) == 1
sig = self.signatures()[0]
if not sig[0].isVoid():
raise WebIDLError("[LenientFloat] used on a non-void method",
[attr.location, self.location])
if not any(arg.type.includesRestrictedFloat() for arg in sig[1]):
raise WebIDLError("[LenientFloat] used on an operation with no "
"restricted float type arguments",
[attr.location, self.location])
elif identifier == "Exposed":
convertExposedAttrToGlobalNameSet(attr, self._exposureGlobalNames)
elif (identifier == "CrossOriginCallable" or
identifier == "WebGLHandlesContextLoss"):
# Known no-argument attributes.
if not attr.noArguments():
raise WebIDLError("[%s] must take no arguments" % identifier,
[attr.location])
elif identifier == "Pure":
if not attr.noArguments():
raise WebIDLError("[Pure] must take no arguments",
[attr.location])
self._setDependsOn("DOMState")
self._setAffects("Nothing")
elif identifier == "Affects":
if not attr.hasValue():
raise WebIDLError("[Affects] takes an identifier",
[attr.location])
self._setAffects(attr.value())
elif identifier == "DependsOn":
if not attr.hasValue():
raise WebIDLError("[DependsOn] takes an identifier",
[attr.location])
self._setDependsOn(attr.value())
elif identifier == "Alias":
if not attr.hasValue():
raise WebIDLError("[Alias] takes an identifier or string",
[attr.location])
self._addAlias(attr.value())
elif identifier == "UseCounter":
if self.isSpecial():
raise WebIDLError("[UseCounter] must not be used on a special "
"operation",
[attr.location, self.location])
elif identifier == "Unscopable":
if not attr.noArguments():
raise WebIDLError("[Unscopable] must take no arguments",
[attr.location])
if self.isStatic():
raise WebIDLError("[Unscopable] is only allowed on non-static "
"attributes and operations",
[attr.location, self.location])
elif identifier == "CEReactions":
if not attr.noArguments():
raise WebIDLError("[CEReactions] must take no arguments",
[attr.location])
if self.isSpecial() and not self.isSetter() and not self.isDeleter():
raise WebIDLError("[CEReactions] is only allowed on operation, "
"attribute, setter, and deleter",
[attr.location, self.location])
elif (identifier == "Throws" or
identifier == "CanOOM" or
identifier == "NewObject" or
identifier == "ChromeOnly" or
identifier == "UnsafeInPrerendering" or
identifier == "Pref" or
identifier == "Deprecated" or
identifier == "Func" or
identifier == "SecureContext" or
identifier == "BinaryName" or
identifier == "NeedsSubjectPrincipal" or
identifier == "NeedsCallerType" or
identifier == "StaticClassOverride"):
# Known attributes that we don't need to do anything with here
pass
else:
raise WebIDLError("Unknown extended attribute %s on method" % identifier,
[attr.location])
IDLInterfaceMember.handleExtendedAttribute(self, attr)
def returnsPromise(self):
return self._overloads[0].returnType.isPromise()
def isUnforgeable(self):
return self._unforgeable
def _getDependentObjects(self):
deps = set()
for overload in self._overloads:
deps.update(overload._getDependentObjects())
return deps
class IDLImplementsStatement(IDLObject):
def __init__(self, location, implementor, implementee):
IDLObject.__init__(self, location)
self.implementor = implementor
self.implementee = implementee
self._finished = False
def finish(self, scope):
if self._finished:
return
assert(isinstance(self.implementor, IDLIdentifierPlaceholder))
assert(isinstance(self.implementee, IDLIdentifierPlaceholder))
implementor = self.implementor.finish(scope)
implementee = self.implementee.finish(scope)
# NOTE: we depend on not setting self.implementor and
# self.implementee here to keep track of the original
# locations.
if not isinstance(implementor, IDLInterface):
raise WebIDLError("Left-hand side of 'implements' is not an "
"interface",
[self.implementor.location])
if implementor.isCallback():
raise WebIDLError("Left-hand side of 'implements' is a callback "
"interface",
[self.implementor.location])
if not isinstance(implementee, IDLInterface):
raise WebIDLError("Right-hand side of 'implements' is not an "
"interface",
[self.implementee.location])
if implementee.isCallback():
raise WebIDLError("Right-hand side of 'implements' is a callback "
"interface",
[self.implementee.location])
implementor.addImplementedInterface(implementee)
self.implementor = implementor
self.implementee = implementee
def validate(self):
pass
def addExtendedAttributes(self, attrs):
if len(attrs) != 0:
raise WebIDLError("There are no extended attributes that are "
"allowed on implements statements",
[attrs[0].location, self.location])
class IDLExtendedAttribute(IDLObject):
"""
A class to represent IDL extended attributes so we can give them locations
"""
def __init__(self, location, tuple):
IDLObject.__init__(self, location)
self._tuple = tuple
def identifier(self):
return self._tuple[0]
def noArguments(self):
return len(self._tuple) == 1
def hasValue(self):
return len(self._tuple) >= 2 and isinstance(self._tuple[1], str)
def value(self):
assert(self.hasValue())
return self._tuple[1]
def hasArgs(self):
return (len(self._tuple) == 2 and isinstance(self._tuple[1], list) or
len(self._tuple) == 3)
def args(self):
assert(self.hasArgs())
# Our args are our last element
return self._tuple[-1]
def listValue(self):
"""
Backdoor for storing random data in _extendedAttrDict
"""
return list(self._tuple)[1:]
# Parser
class Tokenizer(object):
tokens = [
"INTEGER",
"FLOATLITERAL",
"IDENTIFIER",
"STRING",
"WHITESPACE",
"OTHER"
]
def t_FLOATLITERAL(self, t):
r'(-?(([0-9]+\.[0-9]*|[0-9]*\.[0-9]+)([Ee][+-]?[0-9]+)?|[0-9]+[Ee][+-]?[0-9]+|Infinity))|NaN'
t.value = float(t.value)
return t
def t_INTEGER(self, t):
r'-?(0([0-7]+|[Xx][0-9A-Fa-f]+)?|[1-9][0-9]*)'
try:
# Can't use int(), because that doesn't handle octal properly.
t.value = parseInt(t.value)
except:
raise WebIDLError("Invalid integer literal",
[Location(lexer=self.lexer,
lineno=self.lexer.lineno,
lexpos=self.lexer.lexpos,
filename=self._filename)])
return t
def t_IDENTIFIER(self, t):
r'[A-Z_a-z][0-9A-Z_a-z-]*'
t.type = self.keywords.get(t.value, 'IDENTIFIER')
return t
def t_STRING(self, t):
r'"[^"]*"'
t.value = t.value[1:-1]
return t
def t_WHITESPACE(self, t):
r'[\t\n\r ]+|[\t\n\r ]*((//[^\n]*|/\*.*?\*/)[\t\n\r ]*)+'
pass
def t_ELLIPSIS(self, t):
r'\.\.\.'
t.type = self.keywords.get(t.value)
return t
def t_OTHER(self, t):
r'[^\t\n\r 0-9A-Z_a-z]'
t.type = self.keywords.get(t.value, 'OTHER')
return t
keywords = {
"module": "MODULE",
"interface": "INTERFACE",
"partial": "PARTIAL",
"dictionary": "DICTIONARY",
"exception": "EXCEPTION",
"enum": "ENUM",
"callback": "CALLBACK",
"typedef": "TYPEDEF",
"implements": "IMPLEMENTS",
"const": "CONST",
"null": "NULL",
"true": "TRUE",
"false": "FALSE",
"serializer": "SERIALIZER",
"stringifier": "STRINGIFIER",
"jsonifier": "JSONIFIER",
"unrestricted": "UNRESTRICTED",
"attribute": "ATTRIBUTE",
"readonly": "READONLY",
"inherit": "INHERIT",
"static": "STATIC",
"getter": "GETTER",
"setter": "SETTER",
"creator": "CREATOR",
"deleter": "DELETER",
"legacycaller": "LEGACYCALLER",
"optional": "OPTIONAL",
"...": "ELLIPSIS",
"::": "SCOPE",
"Date": "DATE",
"DOMString": "DOMSTRING",
"ByteString": "BYTESTRING",
"USVString": "USVSTRING",
"any": "ANY",
"boolean": "BOOLEAN",
"byte": "BYTE",
"double": "DOUBLE",
"float": "FLOAT",
"long": "LONG",
"object": "OBJECT",
"octet": "OCTET",
"Promise": "PROMISE",
"required": "REQUIRED",
"sequence": "SEQUENCE",
"record": "RECORD",
"short": "SHORT",
"unsigned": "UNSIGNED",
"void": "VOID",
":": "COLON",
";": "SEMICOLON",
"{": "LBRACE",
"}": "RBRACE",
"(": "LPAREN",
")": "RPAREN",
"[": "LBRACKET",
"]": "RBRACKET",
"?": "QUESTIONMARK",
",": "COMMA",
"=": "EQUALS",
"<": "LT",
">": "GT",
"ArrayBuffer": "ARRAYBUFFER",
"SharedArrayBuffer": "SHAREDARRAYBUFFER",
"or": "OR",
"maplike": "MAPLIKE",
"setlike": "SETLIKE",
"iterable": "ITERABLE",
"namespace": "NAMESPACE"
}
tokens.extend(keywords.values())
def t_error(self, t):
raise WebIDLError("Unrecognized Input",
[Location(lexer=self.lexer,
lineno=self.lexer.lineno,
lexpos=self.lexer.lexpos,
filename=self.filename)])
def __init__(self, outputdir, lexer=None):
if lexer:
self.lexer = lexer
else:
self.lexer = lex.lex(object=self,
outputdir=outputdir,
lextab='webidllex',
reflags=re.DOTALL)
class SqueakyCleanLogger(object):
errorWhitelist = [
# Web IDL defines the WHITESPACE token, but doesn't actually
# use it ... so far.
"Token 'WHITESPACE' defined, but not used",
# And that means we have an unused token
"There is 1 unused token",
# Web IDL defines a OtherOrComma rule that's only used in
# ExtendedAttributeInner, which we don't use yet.
"Rule 'OtherOrComma' defined, but not used",
# And an unused rule
"There is 1 unused rule",
# And the OtherOrComma grammar symbol is unreachable.
"Symbol 'OtherOrComma' is unreachable",
# Which means the Other symbol is unreachable.
"Symbol 'Other' is unreachable",
]
def __init__(self):
self.errors = []
def debug(self, msg, *args, **kwargs):
pass
info = debug
def warning(self, msg, *args, **kwargs):
if msg == "%s:%d: Rule %r defined, but not used" or \
msg == "%s:%d: Rule '%s' defined, but not used":
# Munge things so we don't have to hardcode filenames and
# line numbers in our whitelist.
whitelistmsg = "Rule %r defined, but not used"
whitelistargs = args[2:]
else:
whitelistmsg = msg
whitelistargs = args
if (whitelistmsg % whitelistargs) not in SqueakyCleanLogger.errorWhitelist:
self.errors.append(msg % args)
error = warning
def reportGrammarErrors(self):
if self.errors:
raise WebIDLError("\n".join(self.errors), [])
class Parser(Tokenizer):
def getLocation(self, p, i):
return Location(self.lexer, p.lineno(i), p.lexpos(i), self._filename)
def globalScope(self):
return self._globalScope
# The p_Foo functions here must match the WebIDL spec's grammar.
# It's acceptable to split things at '|' boundaries.
def p_Definitions(self, p):
"""
Definitions : ExtendedAttributeList Definition Definitions
"""
if p[2]:
p[0] = [p[2]]
p[2].addExtendedAttributes(p[1])
else:
assert not p[1]
p[0] = []
p[0].extend(p[3])
def p_DefinitionsEmpty(self, p):
"""
Definitions :
"""
p[0] = []
def p_Definition(self, p):
"""
Definition : CallbackOrInterface
| Namespace
| Partial
| Dictionary
| Exception
| Enum
| Typedef
| ImplementsStatement
"""
p[0] = p[1]
assert p[1] # We might not have implemented something ...
def p_CallbackOrInterfaceCallback(self, p):
"""
CallbackOrInterface : CALLBACK CallbackRestOrInterface
"""
if p[2].isInterface():
assert isinstance(p[2], IDLInterface)
p[2].setCallback(True)
p[0] = p[2]
def p_CallbackOrInterfaceInterface(self, p):
"""
CallbackOrInterface : Interface
"""
p[0] = p[1]
def p_CallbackRestOrInterface(self, p):
"""
CallbackRestOrInterface : CallbackRest
| Interface
"""
assert p[1]
p[0] = p[1]
def handleNonPartialObject(self, location, identifier, constructor,
constructorArgs, nonPartialArgs):
"""
This handles non-partial objects (interfaces and namespaces) by
checking for an existing partial object, and promoting it to
non-partial as needed. The return value is the non-partial object.
constructorArgs are all the args for the constructor except the last
one: isKnownNonPartial.
nonPartialArgs are the args for the setNonPartial call.
"""
# The name of the class starts with "IDL", so strip that off.
# Also, starts with a capital letter after that, so nix that
# as well.
prettyname = constructor.__name__[3:].lower()
try:
existingObj = self.globalScope()._lookupIdentifier(identifier)
if existingObj:
if not isinstance(existingObj, constructor):
raise WebIDLError("%s has the same name as "
"non-%s object" %
(prettyname.capitalize(), prettyname),
[location, existingObj.location])
existingObj.setNonPartial(*nonPartialArgs)
return existingObj
except Exception, ex:
if isinstance(ex, WebIDLError):
raise ex
pass
# True for isKnownNonPartial
return constructor(*(constructorArgs + [True]))
def p_Interface(self, p):
"""
Interface : INTERFACE IDENTIFIER Inheritance LBRACE InterfaceMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
members = p[5]
parent = p[3]
p[0] = self.handleNonPartialObject(
location, identifier, IDLInterface,
[location, self.globalScope(), identifier, parent, members],
[location, parent, members])
def p_InterfaceForwardDecl(self, p):
"""
Interface : INTERFACE IDENTIFIER SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
try:
if self.globalScope()._lookupIdentifier(identifier):
p[0] = self.globalScope()._lookupIdentifier(identifier)
if not isinstance(p[0], IDLExternalInterface):
raise WebIDLError("Name collision between external "
"interface declaration for identifier "
"%s and %s" % (identifier.name, p[0]),
[location, p[0].location])
return
except Exception, ex:
if isinstance(ex, WebIDLError):
raise ex
pass
p[0] = IDLExternalInterface(location, self.globalScope(), identifier)
def p_Namespace(self, p):
"""
Namespace : NAMESPACE IDENTIFIER LBRACE InterfaceMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
members = p[4]
p[0] = self.handleNonPartialObject(
location, identifier, IDLNamespace,
[location, self.globalScope(), identifier, members],
[location, None, members])
def p_Partial(self, p):
"""
Partial : PARTIAL PartialDefinition
"""
p[0] = p[2]
def p_PartialDefinition(self, p):
"""
PartialDefinition : PartialInterface
| PartialNamespace
"""
p[0] = p[1]
def handlePartialObject(self, location, identifier, nonPartialConstructor,
nonPartialConstructorArgs,
partialConstructorArgs):
"""
This handles partial objects (interfaces and namespaces) by checking for
an existing non-partial object, and adding ourselves to it as needed.
The return value is our partial object. For now we just use
IDLPartialInterfaceOrNamespace for partial objects.
nonPartialConstructorArgs are all the args for the non-partial
constructor except the last two: members and isKnownNonPartial.
partialConstructorArgs are the arguments for the
IDLPartialInterfaceOrNamespace constructor, except the last one (the
non-partial object).
"""
# The name of the class starts with "IDL", so strip that off.
# Also, starts with a capital letter after that, so nix that
# as well.
prettyname = nonPartialConstructor.__name__[3:].lower()
nonPartialObject = None
try:
nonPartialObject = self.globalScope()._lookupIdentifier(identifier)
if nonPartialObject:
if not isinstance(nonPartialObject, nonPartialConstructor):
raise WebIDLError("Partial %s has the same name as "
"non-%s object" %
(prettyname, prettyname),
[location, nonPartialObject.location])
except Exception, ex:
if isinstance(ex, WebIDLError):
raise ex
pass
if not nonPartialObject:
nonPartialObject = nonPartialConstructor(
# No members, False for isKnownNonPartial
*(nonPartialConstructorArgs + [[], False]))
partialInterface = IDLPartialInterfaceOrNamespace(
*(partialConstructorArgs + [nonPartialObject]))
return partialInterface
def p_PartialInterface(self, p):
"""
PartialInterface : INTERFACE IDENTIFIER LBRACE InterfaceMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
members = p[4]
p[0] = self.handlePartialObject(
location, identifier, IDLInterface,
[location, self.globalScope(), identifier, None],
[location, identifier, members])
def p_PartialNamespace(self, p):
"""
PartialNamespace : NAMESPACE IDENTIFIER LBRACE InterfaceMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
members = p[4]
p[0] = self.handlePartialObject(
location, identifier, IDLNamespace,
[location, self.globalScope(), identifier],
[location, identifier, members])
def p_Inheritance(self, p):
"""
Inheritance : COLON ScopedName
"""
p[0] = IDLIdentifierPlaceholder(self.getLocation(p, 2), p[2])
def p_InheritanceEmpty(self, p):
"""
Inheritance :
"""
pass
def p_InterfaceMembers(self, p):
"""
InterfaceMembers : ExtendedAttributeList InterfaceMember InterfaceMembers
"""
p[0] = [p[2]] if p[2] else []
assert not p[1] or p[2]
p[2].addExtendedAttributes(p[1])
p[0].extend(p[3])
def p_InterfaceMembersEmpty(self, p):
"""
InterfaceMembers :
"""
p[0] = []
def p_InterfaceMember(self, p):
"""
InterfaceMember : Const
| AttributeOrOperationOrMaplikeOrSetlikeOrIterable
"""
p[0] = p[1]
def p_Dictionary(self, p):
"""
Dictionary : DICTIONARY IDENTIFIER Inheritance LBRACE DictionaryMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
members = p[5]
p[0] = IDLDictionary(location, self.globalScope(), identifier, p[3], members)
def p_DictionaryMembers(self, p):
"""
DictionaryMembers : ExtendedAttributeList DictionaryMember DictionaryMembers
|
"""
if len(p) == 1:
# We're at the end of the list
p[0] = []
return
# Add our extended attributes
p[2].addExtendedAttributes(p[1])
p[0] = [p[2]]
p[0].extend(p[3])
def p_DictionaryMember(self, p):
"""
DictionaryMember : Required Type IDENTIFIER Default SEMICOLON
"""
# These quack a lot like optional arguments, so just treat them that way.
t = p[2]
assert isinstance(t, IDLType)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 3), p[3])
defaultValue = p[4]
optional = not p[1]
if not optional and defaultValue:
raise WebIDLError("Required dictionary members can't have a default value.",
[self.getLocation(p, 4)])
p[0] = IDLArgument(self.getLocation(p, 3), identifier, t,
optional=optional,
defaultValue=defaultValue, variadic=False,
dictionaryMember=True)
def p_Default(self, p):
"""
Default : EQUALS DefaultValue
|
"""
if len(p) > 1:
p[0] = p[2]
else:
p[0] = None
def p_DefaultValue(self, p):
"""
DefaultValue : ConstValue
| LBRACKET RBRACKET
"""
if len(p) == 2:
p[0] = p[1]
else:
assert len(p) == 3 # Must be []
p[0] = IDLEmptySequenceValue(self.getLocation(p, 1))
def p_Exception(self, p):
"""
Exception : EXCEPTION IDENTIFIER Inheritance LBRACE ExceptionMembers RBRACE SEMICOLON
"""
pass
def p_Enum(self, p):
"""
Enum : ENUM IDENTIFIER LBRACE EnumValueList RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
values = p[4]
assert values
p[0] = IDLEnum(location, self.globalScope(), identifier, values)
def p_EnumValueList(self, p):
"""
EnumValueList : STRING EnumValueListComma
"""
p[0] = [p[1]]
p[0].extend(p[2])
def p_EnumValueListComma(self, p):
"""
EnumValueListComma : COMMA EnumValueListString
"""
p[0] = p[2]
def p_EnumValueListCommaEmpty(self, p):
"""
EnumValueListComma :
"""
p[0] = []
def p_EnumValueListString(self, p):
"""
EnumValueListString : STRING EnumValueListComma
"""
p[0] = [p[1]]
p[0].extend(p[2])
def p_EnumValueListStringEmpty(self, p):
"""
EnumValueListString :
"""
p[0] = []
def p_CallbackRest(self, p):
"""
CallbackRest : IDENTIFIER EQUALS ReturnType LPAREN ArgumentList RPAREN SEMICOLON
"""
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
p[0] = IDLCallback(self.getLocation(p, 1), self.globalScope(),
identifier, p[3], p[5])
def p_ExceptionMembers(self, p):
"""
ExceptionMembers : ExtendedAttributeList ExceptionMember ExceptionMembers
|
"""
pass
def p_Typedef(self, p):
"""
Typedef : TYPEDEF Type IDENTIFIER SEMICOLON
"""
typedef = IDLTypedef(self.getLocation(p, 1), self.globalScope(),
p[2], p[3])
p[0] = typedef
def p_ImplementsStatement(self, p):
"""
ImplementsStatement : ScopedName IMPLEMENTS ScopedName SEMICOLON
"""
assert(p[2] == "implements")
implementor = IDLIdentifierPlaceholder(self.getLocation(p, 1), p[1])
implementee = IDLIdentifierPlaceholder(self.getLocation(p, 3), p[3])
p[0] = IDLImplementsStatement(self.getLocation(p, 1), implementor,
implementee)
def p_Const(self, p):
"""
Const : CONST ConstType IDENTIFIER EQUALS ConstValue SEMICOLON
"""
location = self.getLocation(p, 1)
type = p[2]
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 3), p[3])
value = p[5]
p[0] = IDLConst(location, identifier, type, value)
def p_ConstValueBoolean(self, p):
"""
ConstValue : BooleanLiteral
"""
location = self.getLocation(p, 1)
booleanType = BuiltinTypes[IDLBuiltinType.Types.boolean]
p[0] = IDLValue(location, booleanType, p[1])
def p_ConstValueInteger(self, p):
"""
ConstValue : INTEGER
"""
location = self.getLocation(p, 1)
# We don't know ahead of time what type the integer literal is.
# Determine the smallest type it could possibly fit in and use that.
integerType = matchIntegerValueToType(p[1])
if integerType is None:
raise WebIDLError("Integer literal out of range", [location])
p[0] = IDLValue(location, integerType, p[1])
def p_ConstValueFloat(self, p):
"""
ConstValue : FLOATLITERAL
"""
location = self.getLocation(p, 1)
p[0] = IDLValue(location, BuiltinTypes[IDLBuiltinType.Types.unrestricted_float], p[1])
def p_ConstValueString(self, p):
"""
ConstValue : STRING
"""
location = self.getLocation(p, 1)
stringType = BuiltinTypes[IDLBuiltinType.Types.domstring]
p[0] = IDLValue(location, stringType, p[1])
def p_ConstValueNull(self, p):
"""
ConstValue : NULL
"""
p[0] = IDLNullValue(self.getLocation(p, 1))
def p_BooleanLiteralTrue(self, p):
"""
BooleanLiteral : TRUE
"""
p[0] = True
def p_BooleanLiteralFalse(self, p):
"""
BooleanLiteral : FALSE
"""
p[0] = False
def p_AttributeOrOperationOrMaplikeOrSetlikeOrIterable(self, p):
"""
AttributeOrOperationOrMaplikeOrSetlikeOrIterable : Attribute
| Maplike
| Setlike
| Iterable
| Operation
"""
p[0] = p[1]
def p_Iterable(self, p):
"""
Iterable : ITERABLE LT Type GT SEMICOLON
| ITERABLE LT Type COMMA Type GT SEMICOLON
"""
location = self.getLocation(p, 2)
identifier = IDLUnresolvedIdentifier(location, "__iterable",
allowDoubleUnderscore=True)
if (len(p) > 6):
keyType = p[3]
valueType = p[5]
else:
keyType = None
valueType = p[3]
p[0] = IDLIterable(location, identifier, keyType, valueType, self.globalScope())
def p_Setlike(self, p):
"""
Setlike : ReadOnly SETLIKE LT Type GT SEMICOLON
"""
readonly = p[1]
maplikeOrSetlikeType = p[2]
location = self.getLocation(p, 2)
identifier = IDLUnresolvedIdentifier(location, "__setlike",
allowDoubleUnderscore=True)
keyType = p[4]
valueType = keyType
p[0] = IDLMaplikeOrSetlike(location, identifier, maplikeOrSetlikeType,
readonly, keyType, valueType)
def p_Maplike(self, p):
"""
Maplike : ReadOnly MAPLIKE LT Type COMMA Type GT SEMICOLON
"""
readonly = p[1]
maplikeOrSetlikeType = p[2]
location = self.getLocation(p, 2)
identifier = IDLUnresolvedIdentifier(location, "__maplike",
allowDoubleUnderscore=True)
keyType = p[4]
valueType = p[6]
p[0] = IDLMaplikeOrSetlike(location, identifier, maplikeOrSetlikeType,
readonly, keyType, valueType)
def p_AttributeWithQualifier(self, p):
"""
Attribute : Qualifier AttributeRest
"""
static = IDLInterfaceMember.Special.Static in p[1]
stringifier = IDLInterfaceMember.Special.Stringifier in p[1]
(location, identifier, type, readonly) = p[2]
p[0] = IDLAttribute(location, identifier, type, readonly,
static=static, stringifier=stringifier)
def p_AttributeInherited(self, p):
"""
Attribute : INHERIT AttributeRest
"""
(location, identifier, type, readonly) = p[2]
p[0] = IDLAttribute(location, identifier, type, readonly, inherit=True)
def p_Attribute(self, p):
"""
Attribute : AttributeRest
"""
(location, identifier, type, readonly) = p[1]
p[0] = IDLAttribute(location, identifier, type, readonly, inherit=False)
def p_AttributeRest(self, p):
"""
AttributeRest : ReadOnly ATTRIBUTE Type AttributeName SEMICOLON
"""
location = self.getLocation(p, 2)
readonly = p[1]
t = p[3]
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 4), p[4])
p[0] = (location, identifier, t, readonly)
def p_ReadOnly(self, p):
"""
ReadOnly : READONLY
"""
p[0] = True
def p_ReadOnlyEmpty(self, p):
"""
ReadOnly :
"""
p[0] = False
def p_Operation(self, p):
"""
Operation : Qualifiers OperationRest
"""
qualifiers = p[1]
# Disallow duplicates in the qualifier set
if not len(set(qualifiers)) == len(qualifiers):
raise WebIDLError("Duplicate qualifiers are not allowed",
[self.getLocation(p, 1)])
static = IDLInterfaceMember.Special.Static in p[1]
# If static is there that's all that's allowed. This is disallowed
# by the parser, so we can assert here.
assert not static or len(qualifiers) == 1
stringifier = IDLInterfaceMember.Special.Stringifier in p[1]
# If stringifier is there that's all that's allowed. This is disallowed
# by the parser, so we can assert here.
assert not stringifier or len(qualifiers) == 1
getter = True if IDLMethod.Special.Getter in p[1] else False
setter = True if IDLMethod.Special.Setter in p[1] else False
creator = True if IDLMethod.Special.Creator in p[1] else False
deleter = True if IDLMethod.Special.Deleter in p[1] else False
legacycaller = True if IDLMethod.Special.LegacyCaller in p[1] else False
if getter or deleter:
if setter or creator:
raise WebIDLError("getter and deleter are incompatible with setter and creator",
[self.getLocation(p, 1)])
(returnType, identifier, arguments) = p[2]
assert isinstance(returnType, IDLType)
specialType = IDLMethod.NamedOrIndexed.Neither
if getter or deleter:
if len(arguments) != 1:
raise WebIDLError("%s has wrong number of arguments" %
("getter" if getter else "deleter"),
[self.getLocation(p, 2)])
argType = arguments[0].type
if argType == BuiltinTypes[IDLBuiltinType.Types.domstring]:
specialType = IDLMethod.NamedOrIndexed.Named
elif argType == BuiltinTypes[IDLBuiltinType.Types.unsigned_long]:
specialType = IDLMethod.NamedOrIndexed.Indexed
if deleter:
raise WebIDLError("There is no such thing as an indexed deleter.",
[self.getLocation(p, 1)])
else:
raise WebIDLError("%s has wrong argument type (must be DOMString or UnsignedLong)" %
("getter" if getter else "deleter"),
[arguments[0].location])
if arguments[0].optional or arguments[0].variadic:
raise WebIDLError("%s cannot have %s argument" %
("getter" if getter else "deleter",
"optional" if arguments[0].optional else "variadic"),
[arguments[0].location])
if getter:
if returnType.isVoid():
raise WebIDLError("getter cannot have void return type",
[self.getLocation(p, 2)])
if setter or creator:
if len(arguments) != 2:
raise WebIDLError("%s has wrong number of arguments" %
("setter" if setter else "creator"),
[self.getLocation(p, 2)])
argType = arguments[0].type
if argType == BuiltinTypes[IDLBuiltinType.Types.domstring]:
specialType = IDLMethod.NamedOrIndexed.Named
elif argType == BuiltinTypes[IDLBuiltinType.Types.unsigned_long]:
specialType = IDLMethod.NamedOrIndexed.Indexed
else:
raise WebIDLError("%s has wrong argument type (must be DOMString or UnsignedLong)" %
("setter" if setter else "creator"),
[arguments[0].location])
if arguments[0].optional or arguments[0].variadic:
raise WebIDLError("%s cannot have %s argument" %
("setter" if setter else "creator",
"optional" if arguments[0].optional else "variadic"),
[arguments[0].location])
if arguments[1].optional or arguments[1].variadic:
raise WebIDLError("%s cannot have %s argument" %
("setter" if setter else "creator",
"optional" if arguments[1].optional else "variadic"),
[arguments[1].location])
if stringifier:
if len(arguments) != 0:
raise WebIDLError("stringifier has wrong number of arguments",
[self.getLocation(p, 2)])
if not returnType.isDOMString():
raise WebIDLError("stringifier must have DOMString return type",
[self.getLocation(p, 2)])
# identifier might be None. This is only permitted for special methods.
if not identifier:
if (not getter and not setter and not creator and
not deleter and not legacycaller and not stringifier):
raise WebIDLError("Identifier required for non-special methods",
[self.getLocation(p, 2)])
location = BuiltinLocation("<auto-generated-identifier>")
identifier = IDLUnresolvedIdentifier(
location,
"__%s%s%s%s%s%s%s" %
("named" if specialType == IDLMethod.NamedOrIndexed.Named else
"indexed" if specialType == IDLMethod.NamedOrIndexed.Indexed else "",
"getter" if getter else "",
"setter" if setter else "",
"deleter" if deleter else "",
"creator" if creator else "",
"legacycaller" if legacycaller else "",
"stringifier" if stringifier else ""),
allowDoubleUnderscore=True)
method = IDLMethod(self.getLocation(p, 2), identifier, returnType, arguments,
static=static, getter=getter, setter=setter, creator=creator,
deleter=deleter, specialType=specialType,
legacycaller=legacycaller, stringifier=stringifier)
p[0] = method
def p_Stringifier(self, p):
"""
Operation : STRINGIFIER SEMICOLON
"""
identifier = IDLUnresolvedIdentifier(BuiltinLocation("<auto-generated-identifier>"),
"__stringifier",
allowDoubleUnderscore=True)
method = IDLMethod(self.getLocation(p, 1),
identifier,
returnType=BuiltinTypes[IDLBuiltinType.Types.domstring],
arguments=[],
stringifier=True)
p[0] = method
def p_Jsonifier(self, p):
"""
Operation : JSONIFIER SEMICOLON
"""
identifier = IDLUnresolvedIdentifier(BuiltinLocation("<auto-generated-identifier>"),
"__jsonifier", allowDoubleUnderscore=True)
method = IDLMethod(self.getLocation(p, 1),
identifier,
returnType=BuiltinTypes[IDLBuiltinType.Types.object],
arguments=[],
jsonifier=True)
p[0] = method
def p_QualifierStatic(self, p):
"""
Qualifier : STATIC
"""
p[0] = [IDLInterfaceMember.Special.Static]
def p_QualifierStringifier(self, p):
"""
Qualifier : STRINGIFIER
"""
p[0] = [IDLInterfaceMember.Special.Stringifier]
def p_Qualifiers(self, p):
"""
Qualifiers : Qualifier
| Specials
"""
p[0] = p[1]
def p_Specials(self, p):
"""
Specials : Special Specials
"""
p[0] = [p[1]]
p[0].extend(p[2])
def p_SpecialsEmpty(self, p):
"""
Specials :
"""
p[0] = []
def p_SpecialGetter(self, p):
"""
Special : GETTER
"""
p[0] = IDLMethod.Special.Getter
def p_SpecialSetter(self, p):
"""
Special : SETTER
"""
p[0] = IDLMethod.Special.Setter
def p_SpecialCreator(self, p):
"""
Special : CREATOR
"""
p[0] = IDLMethod.Special.Creator
def p_SpecialDeleter(self, p):
"""
Special : DELETER
"""
p[0] = IDLMethod.Special.Deleter
def p_SpecialLegacyCaller(self, p):
"""
Special : LEGACYCALLER
"""
p[0] = IDLMethod.Special.LegacyCaller
def p_OperationRest(self, p):
"""
OperationRest : ReturnType OptionalIdentifier LPAREN ArgumentList RPAREN SEMICOLON
"""
p[0] = (p[1], p[2], p[4])
def p_OptionalIdentifier(self, p):
"""
OptionalIdentifier : IDENTIFIER
"""
p[0] = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
def p_OptionalIdentifierEmpty(self, p):
"""
OptionalIdentifier :
"""
pass
def p_ArgumentList(self, p):
"""
ArgumentList : Argument Arguments
"""
p[0] = [p[1]] if p[1] else []
p[0].extend(p[2])
def p_ArgumentListEmpty(self, p):
"""
ArgumentList :
"""
p[0] = []
def p_Arguments(self, p):
"""
Arguments : COMMA Argument Arguments
"""
p[0] = [p[2]] if p[2] else []
p[0].extend(p[3])
def p_ArgumentsEmpty(self, p):
"""
Arguments :
"""
p[0] = []
def p_Argument(self, p):
"""
Argument : ExtendedAttributeList Optional Type Ellipsis ArgumentName Default
"""
t = p[3]
assert isinstance(t, IDLType)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 5), p[5])
optional = p[2]
variadic = p[4]
defaultValue = p[6]
if not optional and defaultValue:
raise WebIDLError("Mandatory arguments can't have a default value.",
[self.getLocation(p, 6)])
# We can't test t.isAny() here and give it a default value as needed,
# since at this point t is not a fully resolved type yet (e.g. it might
# be a typedef). We'll handle the 'any' case in IDLArgument.complete.
if variadic:
if optional:
raise WebIDLError("Variadic arguments should not be marked optional.",
[self.getLocation(p, 2)])
optional = variadic
p[0] = IDLArgument(self.getLocation(p, 5), identifier, t, optional, defaultValue, variadic)
p[0].addExtendedAttributes(p[1])
def p_ArgumentName(self, p):
"""
ArgumentName : IDENTIFIER
| ATTRIBUTE
| CALLBACK
| CONST
| CREATOR
| DELETER
| DICTIONARY
| ENUM
| EXCEPTION
| GETTER
| IMPLEMENTS
| INHERIT
| INTERFACE
| ITERABLE
| LEGACYCALLER
| MAPLIKE
| PARTIAL
| REQUIRED
| SERIALIZER
| SETLIKE
| SETTER
| STATIC
| STRINGIFIER
| JSONIFIER
| TYPEDEF
| UNRESTRICTED
| NAMESPACE
"""
p[0] = p[1]
def p_AttributeName(self, p):
"""
AttributeName : IDENTIFIER
| REQUIRED
"""
p[0] = p[1]
def p_Optional(self, p):
"""
Optional : OPTIONAL
"""
p[0] = True
def p_OptionalEmpty(self, p):
"""
Optional :
"""
p[0] = False
def p_Required(self, p):
"""
Required : REQUIRED
"""
p[0] = True
def p_RequiredEmpty(self, p):
"""
Required :
"""
p[0] = False
def p_Ellipsis(self, p):
"""
Ellipsis : ELLIPSIS
"""
p[0] = True
def p_EllipsisEmpty(self, p):
"""
Ellipsis :
"""
p[0] = False
def p_ExceptionMember(self, p):
"""
ExceptionMember : Const
| ExceptionField
"""
pass
def p_ExceptionField(self, p):
"""
ExceptionField : Type IDENTIFIER SEMICOLON
"""
pass
def p_ExtendedAttributeList(self, p):
"""
ExtendedAttributeList : LBRACKET ExtendedAttribute ExtendedAttributes RBRACKET
"""
p[0] = [p[2]]
if p[3]:
p[0].extend(p[3])
def p_ExtendedAttributeListEmpty(self, p):
"""
ExtendedAttributeList :
"""
p[0] = []
def p_ExtendedAttribute(self, p):
"""
ExtendedAttribute : ExtendedAttributeNoArgs
| ExtendedAttributeArgList
| ExtendedAttributeIdent
| ExtendedAttributeNamedArgList
| ExtendedAttributeIdentList
"""
p[0] = IDLExtendedAttribute(self.getLocation(p, 1), p[1])
def p_ExtendedAttributeEmpty(self, p):
"""
ExtendedAttribute :
"""
pass
def p_ExtendedAttributes(self, p):
"""
ExtendedAttributes : COMMA ExtendedAttribute ExtendedAttributes
"""
p[0] = [p[2]] if p[2] else []
p[0].extend(p[3])
def p_ExtendedAttributesEmpty(self, p):
"""
ExtendedAttributes :
"""
p[0] = []
def p_Other(self, p):
"""
Other : INTEGER
| FLOATLITERAL
| IDENTIFIER
| STRING
| OTHER
| ELLIPSIS
| COLON
| SCOPE
| SEMICOLON
| LT
| EQUALS
| GT
| QUESTIONMARK
| DATE
| DOMSTRING
| BYTESTRING
| USVSTRING
| ANY
| ATTRIBUTE
| BOOLEAN
| BYTE
| LEGACYCALLER
| CONST
| CREATOR
| DELETER
| DOUBLE
| EXCEPTION
| FALSE
| FLOAT
| GETTER
| IMPLEMENTS
| INHERIT
| INTERFACE
| LONG
| MODULE
| NULL
| OBJECT
| OCTET
| OPTIONAL
| SEQUENCE
| RECORD
| SETTER
| SHORT
| STATIC
| STRINGIFIER
| JSONIFIER
| TRUE
| TYPEDEF
| UNSIGNED
| VOID
"""
pass
def p_OtherOrComma(self, p):
"""
OtherOrComma : Other
| COMMA
"""
pass
def p_TypeSingleType(self, p):
"""
Type : SingleType
"""
p[0] = p[1]
def p_TypeUnionType(self, p):
"""
Type : UnionType Null
"""
p[0] = self.handleNullable(p[1], p[2])
def p_SingleTypeNonAnyType(self, p):
"""
SingleType : NonAnyType
"""
p[0] = p[1]
def p_SingleTypeAnyType(self, p):
"""
SingleType : ANY
"""
p[0] = BuiltinTypes[IDLBuiltinType.Types.any]
def p_UnionType(self, p):
"""
UnionType : LPAREN UnionMemberType OR UnionMemberType UnionMemberTypes RPAREN
"""
types = [p[2], p[4]]
types.extend(p[5])
p[0] = IDLUnionType(self.getLocation(p, 1), types)
def p_UnionMemberTypeNonAnyType(self, p):
"""
UnionMemberType : NonAnyType
"""
p[0] = p[1]
def p_UnionMemberType(self, p):
"""
UnionMemberType : UnionType Null
"""
p[0] = self.handleNullable(p[1], p[2])
def p_UnionMemberTypes(self, p):
"""
UnionMemberTypes : OR UnionMemberType UnionMemberTypes
"""
p[0] = [p[2]]
p[0].extend(p[3])
def p_UnionMemberTypesEmpty(self, p):
"""
UnionMemberTypes :
"""
p[0] = []
def p_NonAnyType(self, p):
"""
NonAnyType : PrimitiveType Null
| ARRAYBUFFER Null
| SHAREDARRAYBUFFER Null
| OBJECT Null
"""
if p[1] == "object":
type = BuiltinTypes[IDLBuiltinType.Types.object]
elif p[1] == "ArrayBuffer":
type = BuiltinTypes[IDLBuiltinType.Types.ArrayBuffer]
elif p[1] == "SharedArrayBuffer":
type = BuiltinTypes[IDLBuiltinType.Types.SharedArrayBuffer]
else:
type = BuiltinTypes[p[1]]
p[0] = self.handleNullable(type, p[2])
def p_NonAnyTypeStringType(self, p):
"""
NonAnyType : StringType Null
"""
p[0] = self.handleNullable(p[1], p[2])
def p_NonAnyTypeSequenceType(self, p):
"""
NonAnyType : SEQUENCE LT Type GT Null
"""
innerType = p[3]
type = IDLSequenceType(self.getLocation(p, 1), innerType)
p[0] = self.handleNullable(type, p[5])
# Note: Promise<void> is allowed, so we want to parametrize on ReturnType,
# not Type. Promise types can't be null, hence no "Null" in there.
def p_NonAnyTypePromiseType(self, p):
"""
NonAnyType : PROMISE LT ReturnType GT
"""
p[0] = IDLPromiseType(self.getLocation(p, 1), p[3])
def p_NonAnyTypeRecordType(self, p):
"""
NonAnyType : RECORD LT StringType COMMA Type GT Null
"""
keyType = p[3]
valueType = p[5]
type = IDLRecordType(self.getLocation(p, 1), keyType, valueType)
p[0] = self.handleNullable(type, p[7])
def p_NonAnyTypeScopedName(self, p):
"""
NonAnyType : ScopedName Null
"""
assert isinstance(p[1], IDLUnresolvedIdentifier)
if p[1].name == "Promise":
raise WebIDLError("Promise used without saying what it's "
"parametrized over",
[self.getLocation(p, 1)])
type = None
try:
if self.globalScope()._lookupIdentifier(p[1]):
obj = self.globalScope()._lookupIdentifier(p[1])
assert not obj.isType()
if obj.isTypedef():
type = IDLTypedefType(self.getLocation(p, 1), obj.innerType,
obj.identifier.name)
elif obj.isCallback() and not obj.isInterface():
type = IDLCallbackType(obj.location, obj)
else:
type = IDLWrapperType(self.getLocation(p, 1), p[1])
p[0] = self.handleNullable(type, p[2])
return
except:
pass
type = IDLUnresolvedType(self.getLocation(p, 1), p[1])
p[0] = self.handleNullable(type, p[2])
def p_NonAnyTypeDate(self, p):
"""
NonAnyType : DATE Null
"""
p[0] = self.handleNullable(BuiltinTypes[IDLBuiltinType.Types.date],
p[2])
def p_ConstType(self, p):
"""
ConstType : PrimitiveType Null
"""
type = BuiltinTypes[p[1]]
p[0] = self.handleNullable(type, p[2])
def p_ConstTypeIdentifier(self, p):
"""
ConstType : IDENTIFIER Null
"""
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
type = IDLUnresolvedType(self.getLocation(p, 1), identifier)
p[0] = self.handleNullable(type, p[2])
def p_PrimitiveTypeUint(self, p):
"""
PrimitiveType : UnsignedIntegerType
"""
p[0] = p[1]
def p_PrimitiveTypeBoolean(self, p):
"""
PrimitiveType : BOOLEAN
"""
p[0] = IDLBuiltinType.Types.boolean
def p_PrimitiveTypeByte(self, p):
"""
PrimitiveType : BYTE
"""
p[0] = IDLBuiltinType.Types.byte
def p_PrimitiveTypeOctet(self, p):
"""
PrimitiveType : OCTET
"""
p[0] = IDLBuiltinType.Types.octet
def p_PrimitiveTypeFloat(self, p):
"""
PrimitiveType : FLOAT
"""
p[0] = IDLBuiltinType.Types.float
def p_PrimitiveTypeUnrestictedFloat(self, p):
"""
PrimitiveType : UNRESTRICTED FLOAT
"""
p[0] = IDLBuiltinType.Types.unrestricted_float
def p_PrimitiveTypeDouble(self, p):
"""
PrimitiveType : DOUBLE
"""
p[0] = IDLBuiltinType.Types.double
def p_PrimitiveTypeUnrestictedDouble(self, p):
"""
PrimitiveType : UNRESTRICTED DOUBLE
"""
p[0] = IDLBuiltinType.Types.unrestricted_double
def p_StringType(self, p):
"""
StringType : BuiltinStringType
"""
p[0] = BuiltinTypes[p[1]]
def p_BuiltinStringTypeDOMString(self, p):
"""
BuiltinStringType : DOMSTRING
"""
p[0] = IDLBuiltinType.Types.domstring
def p_BuiltinStringTypeBytestring(self, p):
"""
BuiltinStringType : BYTESTRING
"""
p[0] = IDLBuiltinType.Types.bytestring
def p_BuiltinStringTypeUSVString(self, p):
"""
BuiltinStringType : USVSTRING
"""
p[0] = IDLBuiltinType.Types.usvstring
def p_UnsignedIntegerTypeUnsigned(self, p):
"""
UnsignedIntegerType : UNSIGNED IntegerType
"""
# Adding one to a given signed integer type gets you the unsigned type:
p[0] = p[2] + 1
def p_UnsignedIntegerType(self, p):
"""
UnsignedIntegerType : IntegerType
"""
p[0] = p[1]
def p_IntegerTypeShort(self, p):
"""
IntegerType : SHORT
"""
p[0] = IDLBuiltinType.Types.short
def p_IntegerTypeLong(self, p):
"""
IntegerType : LONG OptionalLong
"""
if p[2]:
p[0] = IDLBuiltinType.Types.long_long
else:
p[0] = IDLBuiltinType.Types.long
def p_OptionalLong(self, p):
"""
OptionalLong : LONG
"""
p[0] = True
def p_OptionalLongEmpty(self, p):
"""
OptionalLong :
"""
p[0] = False
def p_Null(self, p):
"""
Null : QUESTIONMARK
|
"""
if len(p) > 1:
p[0] = self.getLocation(p, 1)
else:
p[0] = None
def p_ReturnTypeType(self, p):
"""
ReturnType : Type
"""
p[0] = p[1]
def p_ReturnTypeVoid(self, p):
"""
ReturnType : VOID
"""
p[0] = BuiltinTypes[IDLBuiltinType.Types.void]
def p_ScopedName(self, p):
"""
ScopedName : AbsoluteScopedName
| RelativeScopedName
"""
p[0] = p[1]
def p_AbsoluteScopedName(self, p):
"""
AbsoluteScopedName : SCOPE IDENTIFIER ScopedNameParts
"""
assert False
pass
def p_RelativeScopedName(self, p):
"""
RelativeScopedName : IDENTIFIER ScopedNameParts
"""
assert not p[2] # Not implemented!
p[0] = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
def p_ScopedNameParts(self, p):
"""
ScopedNameParts : SCOPE IDENTIFIER ScopedNameParts
"""
assert False
pass
def p_ScopedNamePartsEmpty(self, p):
"""
ScopedNameParts :
"""
p[0] = None
def p_ExtendedAttributeNoArgs(self, p):
"""
ExtendedAttributeNoArgs : IDENTIFIER
"""
p[0] = (p[1],)
def p_ExtendedAttributeArgList(self, p):
"""
ExtendedAttributeArgList : IDENTIFIER LPAREN ArgumentList RPAREN
"""
p[0] = (p[1], p[3])
def p_ExtendedAttributeIdent(self, p):
"""
ExtendedAttributeIdent : IDENTIFIER EQUALS STRING
| IDENTIFIER EQUALS IDENTIFIER
"""
p[0] = (p[1], p[3])
def p_ExtendedAttributeNamedArgList(self, p):
"""
ExtendedAttributeNamedArgList : IDENTIFIER EQUALS IDENTIFIER LPAREN ArgumentList RPAREN
"""
p[0] = (p[1], p[3], p[5])
def p_ExtendedAttributeIdentList(self, p):
"""
ExtendedAttributeIdentList : IDENTIFIER EQUALS LPAREN IdentifierList RPAREN
"""
p[0] = (p[1], p[4])
def p_IdentifierList(self, p):
"""
IdentifierList : IDENTIFIER Identifiers
"""
idents = list(p[2])
idents.insert(0, p[1])
p[0] = idents
def p_IdentifiersList(self, p):
"""
Identifiers : COMMA IDENTIFIER Identifiers
"""
idents = list(p[3])
idents.insert(0, p[2])
p[0] = idents
def p_IdentifiersEmpty(self, p):
"""
Identifiers :
"""
p[0] = []
def p_error(self, p):
if not p:
raise WebIDLError("Syntax Error at end of file. Possibly due to missing semicolon(;), braces(}) or both",
[self._filename])
else:
raise WebIDLError("invalid syntax", [Location(self.lexer, p.lineno, p.lexpos, self._filename)])
def __init__(self, outputdir='', lexer=None):
Tokenizer.__init__(self, outputdir, lexer)
logger = SqueakyCleanLogger()
try:
self.parser = yacc.yacc(module=self,
outputdir=outputdir,
tabmodule='webidlyacc',
errorlog=logger,
debug=False
# Pickling the grammar is a speedup in
# some cases (older Python?) but a
# significant slowdown in others.
# We're not pickling for now, until it
# becomes a speedup again.
# , picklefile='WebIDLGrammar.pkl'
)
finally:
logger.reportGrammarErrors()
self._globalScope = IDLScope(BuiltinLocation("<Global Scope>"), None, None)
# To make our test harness work, pretend like we have a primary global already.
# Note that we _don't_ set _globalScope.primaryGlobalAttr,
# so we'll still be able to detect multiple PrimaryGlobal extended attributes.
self._globalScope.primaryGlobalName = "FakeTestPrimaryGlobal"
self._globalScope.globalNames.add("FakeTestPrimaryGlobal")
self._globalScope.globalNameMapping["FakeTestPrimaryGlobal"].add("FakeTestPrimaryGlobal")
# And we add the special-cased "System" global name, which
# doesn't have any corresponding interfaces.
self._globalScope.globalNames.add("System")
self._globalScope.globalNameMapping["System"].add("BackstagePass")
self._installBuiltins(self._globalScope)
self._productions = []
self._filename = "<builtin>"
self.lexer.input(Parser._builtins)
self._filename = None
self.parser.parse(lexer=self.lexer, tracking=True)
def _installBuiltins(self, scope):
assert isinstance(scope, IDLScope)
# xrange omits the last value.
for x in xrange(IDLBuiltinType.Types.ArrayBuffer, IDLBuiltinType.Types.Float64Array + 1):
builtin = BuiltinTypes[x]
name = builtin.name
typedef = IDLTypedef(BuiltinLocation("<builtin type>"), scope, builtin, name)
@ staticmethod
def handleNullable(type, questionMarkLocation):
if questionMarkLocation is not None:
type = IDLNullableType(questionMarkLocation, type)
return type
def parse(self, t, filename=None):
self.lexer.input(t)
# for tok in iter(self.lexer.token, None):
# print tok
self._filename = filename
self._productions.extend(self.parser.parse(lexer=self.lexer, tracking=True))
self._filename = None
def finish(self):
# If we have interfaces that are iterable, create their
# iterator interfaces and add them to the productions array.
interfaceStatements = []
for p in self._productions:
if isinstance(p, IDLInterface):
interfaceStatements.append(p)
if p.identifier.name == "Navigator":
navigatorInterface = p
iterableIteratorIface = None
for iface in interfaceStatements:
navigatorProperty = iface.getNavigatorProperty()
if navigatorProperty:
# We're generating a partial interface to add a readonly
# property to the Navigator interface for every interface
# annotated with NavigatorProperty.
partialInterface = IDLPartialInterfaceOrNamespace(
iface.location,
IDLUnresolvedIdentifier(iface.location, "Navigator"),
[ navigatorProperty ],
navigatorInterface)
self._productions.append(partialInterface)
iterable = None
# We haven't run finish() on the interface yet, so we don't know
# whether our interface is maplike/setlike/iterable or not. This
# means we have to loop through the members to see if we have an
# iterable member.
for m in iface.members:
if isinstance(m, IDLIterable):
iterable = m
break
if iterable and iterable.isPairIterator():
def simpleExtendedAttr(str):
return IDLExtendedAttribute(iface.location, (str, ))
nextMethod = IDLMethod(
iface.location,
IDLUnresolvedIdentifier(iface.location, "next"),
BuiltinTypes[IDLBuiltinType.Types.object], [])
nextMethod.addExtendedAttributes([simpleExtendedAttr("Throws")])
itr_ident = IDLUnresolvedIdentifier(iface.location,
iface.identifier.name + "Iterator")
itr_iface = IDLInterface(iface.location, self.globalScope(),
itr_ident, None, [nextMethod],
isKnownNonPartial=True)
itr_iface.addExtendedAttributes([simpleExtendedAttr("NoInterfaceObject")])
# Make sure the exposure set for the iterator interface is the
# same as the exposure set for the iterable interface, because
# we're going to generate methods on the iterable that return
# instances of the iterator.
itr_iface._exposureGlobalNames = set(iface._exposureGlobalNames)
# Always append generated iterable interfaces after the
# interface they're a member of, otherwise nativeType generation
# won't work correctly.
itr_iface.iterableInterface = iface
self._productions.append(itr_iface)
iterable.iteratorType = IDLWrapperType(iface.location, itr_iface)
# Then, finish all the IDLImplementsStatements. In particular, we
# have to make sure we do those before we do the IDLInterfaces.
# XXX khuey hates this bit and wants to nuke it from orbit.
implementsStatements = [p for p in self._productions if
isinstance(p, IDLImplementsStatement)]
otherStatements = [p for p in self._productions if
not isinstance(p, IDLImplementsStatement)]
for production in implementsStatements:
production.finish(self.globalScope())
for production in otherStatements:
production.finish(self.globalScope())
# Do any post-finish validation we need to do
for production in self._productions:
production.validate()
# De-duplicate self._productions, without modifying its order.
seen = set()
result = []
for p in self._productions:
if p not in seen:
seen.add(p)
result.append(p)
return result
def reset(self):
return Parser(lexer=self.lexer)
# Builtin IDL defined by WebIDL
_builtins = """
typedef unsigned long long DOMTimeStamp;
typedef (ArrayBufferView or ArrayBuffer) BufferSource;
"""
def main():
# Parse arguments.
from optparse import OptionParser
usageString = "usage: %prog [options] files"
o = OptionParser(usage=usageString)
o.add_option("--cachedir", dest='cachedir', default=None,
help="Directory in which to cache lex/parse tables.")
o.add_option("--verbose-errors", action='store_true', default=False,
help="When an error happens, display the Python traceback.")
(options, args) = o.parse_args()
if len(args) < 1:
o.error(usageString)
fileList = args
baseDir = os.getcwd()
# Parse the WebIDL.
parser = Parser(options.cachedir)
try:
for filename in fileList:
fullPath = os.path.normpath(os.path.join(baseDir, filename))
f = open(fullPath, 'rb')
lines = f.readlines()
f.close()
print fullPath
parser.parse(''.join(lines), fullPath)
parser.finish()
except WebIDLError, e:
if options.verbose_errors:
traceback.print_exc()
else:
print e
if __name__ == '__main__':
main()
| mpl-2.0 |
blackball/an-test6 | pyspherematch-only/libkd/setup.py | 1 | 1350 | from distutils.core import setup, Extension
import numpy
import os.path
numpy_inc = (os.path.dirname(numpy.__file__) +
'/core/include/numpy')
c_module = Extension('spherematch_c',
sources = ['pyspherematch.c'],
include_dirs = [ numpy_inc,
'../qfits-an/include',
'../util', '.', ],
# extra_objects = ['libkd-noio.a',
extra_objects = ['libkd.a',
'../util/libanfiles.a',
'../util/libanutils.a',
'../qfits-an/lib/libqfits.a',
])
setup(name = 'pyspherematch',
ext_package='pyspherematch',
version = '0.3',
description = 'This package finds near neighbours in two sets of points. Stand-alone python lib, extracted from Astrometry.net',
author = 'Astrometry.net (Dustin Lang), python release by Sjoert van Velzen',
author_email = 'dstn@cs.toronto.edu, s.vanvelzen@astro.ru.nl',
url = 'http://astrometry.net',
ext_modules = [c_module],
packages=['pyspherematch', 'pyspherematch/util'],
package_dir={'pyspherematch': '.', 'pyspherematch/util': '../util/'})
| gpl-2.0 |
alexsavio/scikit-learn | sklearn/learning_curve.py | 7 | 15161 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the functions are moved."
" This module will be removed in 0.20",
DeprecationWarning)
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0,
error_score='raise'):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True,
error_score=error_score)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<sphx_glr_auto_examples_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
sergiyprotsiv/openaddresses | scripts/at/split.py | 38 | 1591 | # -*- coding: utf-8 -*-
import __future__
import csv, sys, json, copy, datetime, time
def main(address_filename, street_filename):
timestamp = datetime.datetime.now().strftime('%Y%m%d')
streets = {}
with open(street_filename) as f:
reader = csv.reader(f, delimiter=';')
next(reader)
for row in reader:
streets[row[0]] = row[1].strip()
writers = {}
pts = {}
with open(address_filename) as f:
reader = csv.reader(f, delimiter=';')
headers = next(reader) + ['STRASSE']
for row in reader:
srs = row[17].strip()
if not srs in writers:
writers[srs] = csv.writer(open('at-{}-{}.csv'.format(srs, timestamp), 'w'))
writers[srs].writerow(headers)
writers[srs].writerow(row + [streets.get(row[4].strip(), '')])
template = {}
with open('at_source.json', 'r') as f:
template = json.load(f)
for srs in writers:
source = copy.deepcopy(template)
source['data'] = 'http://data.openaddresses.io/cache/at-{}.zip'.format(timestamp)
source['conform']['srs'] = 'EPSG:{}'.format(srs)
source['conform']['file'] = 'at-{}-{}.csv'.format(srs, timestamp)
source['attribution'] = '© Austrian address register, date data from {}'.format(datetime.datetime.now().isoformat().split('T')[0])
with open('at-{}.json'.format(srs), 'w') as f:
json.dump(source, f, indent=4)
print(timestamp)
if __name__ == '__main__':
main(address_filename=sys.argv[1], street_filename=sys.argv[2]) | bsd-3-clause |
tomtor/QGIS | python/processing/__init__.py | 45 | 1256 | # -#- coding: utf-8 -#-
###########################################################################
# __init__.py
# ---------------------
# Date : November 2018
# Copyright : (C) 2018 by Nathan Woodrow
# Email : woodrow dot nathan at gmail dot com
###########################################################################
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
###########################################################################
"""
QGIS Processing Python additions.
This module contains stable API adding additional Python specific functionality
to the core QGIS c++ Processing classes.
"""
__author__ = 'Nathan Woodrow'
__date__ = 'November 2018'
__copyright__ = '(C) 2018, Nathan Woodrow'
from .algfactory import ProcessingAlgFactory
alg = ProcessingAlgFactory()
| gpl-2.0 |
davidnmurray/iris | lib/iris/tests/unit/cube/test_Cube__operators.py | 4 | 5793 | # (C) British Crown Copyright 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.cube.Cube` class operators."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import iris
import iris.tests as tests
import numpy as np
import biggus
class Test_Lazy_Maths(tests.IrisTest):
def build_lazy_cube(self, points, bounds=None, nx=10):
data = np.arange(len(points) * nx).reshape(len(points), nx)
data = biggus.NumpyArrayAdapter(data)
cube = iris.cube.Cube(data, standard_name='air_temperature', units='K')
lat = iris.coords.DimCoord(points, 'latitude', bounds=bounds)
lon = iris.coords.DimCoord(np.arange(nx), 'longitude')
cube.add_dim_coord(lat, 0)
cube.add_dim_coord(lon, 1)
return cube
def assert_elementwise(self, cube, other, result, np_op):
self.assertIsInstance(result, biggus._Elementwise)
self.assertEqual(result._numpy_op, np_op)
self.assertArrayAlmostEqual(result._array1, cube.lazy_data())
if other is not None:
self.assertArrayAlmostEqual(result._array2, other)
def test_lazy_biggus_add_cubes(self):
c1 = self.build_lazy_cube([1, 2])
cube = c1 + c1
result = cube.lazy_data()
self.assertTrue(cube.has_lazy_data())
self.assert_elementwise(c1, c1.lazy_data(), result, np.add)
def test_lazy_biggus_add_scalar(self):
c1 = self.build_lazy_cube([1, 2])
cube = c1 + 5
self.assertEqual(c1 + 5, 5 + c1)
result = cube.lazy_data()
self.assertTrue(cube.has_lazy_data())
self.assert_elementwise(c1, None, result, np.add)
def test_lazy_biggus_mul_cubes(self):
c1 = self.build_lazy_cube([1, 2])
cube = c1 * c1
result = cube.lazy_data()
self.assertTrue(cube.has_lazy_data())
self.assert_elementwise(c1, c1.lazy_data(), result, np.multiply)
def test_lazy_biggus_mul_scalar(self):
c1 = self.build_lazy_cube([1, 2])
cube = c1 * 5
self.assertEqual(c1 * 5, 5 * c1)
result = cube.lazy_data()
self.assertTrue(cube.has_lazy_data())
self.assert_elementwise(c1, None, result, np.multiply)
def test_lazy_biggus_sub_cubes(self):
c1 = self.build_lazy_cube([1, 2])
cube = c1 - c1
result = cube.lazy_data()
self.assertTrue(cube.has_lazy_data())
self.assert_elementwise(c1, c1.lazy_data(), result, np.subtract)
def test_lazy_biggus_sub_scalar(self):
c1 = self.build_lazy_cube([1, 2])
cube = c1 - 5
result = cube.lazy_data()
self.assertTrue(cube.has_lazy_data())
self.assert_elementwise(c1, None, result, np.subtract)
def test_lazy_biggus_div_cubes(self):
c1 = self.build_lazy_cube([1, 2])
cube = c1 / c1
result = cube.lazy_data()
self.assertTrue(cube.has_lazy_data())
self.assert_elementwise(c1, c1.lazy_data(), result, np.divide)
def test_lazy_biggus_div_scalar(self):
c1 = self.build_lazy_cube([1, 2])
cube = c1 / 5
result = cube.lazy_data()
self.assertTrue(cube.has_lazy_data())
self.assert_elementwise(c1, None, result, np.divide)
class Test_Scalar_Cube_Lazy_Maths(tests.IrisTest):
def build_lazy_cube(self, value):
data = np.array(value)
data = biggus.NumpyArrayAdapter(data)
return iris.cube.Cube(data, standard_name='air_temperature', units='K')
def setUp(self):
self.c1 = self.build_lazy_cube(3)
self.c2 = self.build_lazy_cube(4)
def test_add_scalar(self):
cube = self.c1 + 5
data = cube.data
self.assertTrue(isinstance(data, np.ndarray))
self.assertEqual(data.shape, ())
def test_add_cubes(self):
cube = self.c1 + self.c2
data = cube.data
self.assertTrue(isinstance(data, np.ndarray))
self.assertEqual(data.shape, ())
def test_mul_scalar(self):
cube = self.c1 * 5
data = cube.data
self.assertTrue(isinstance(data, np.ndarray))
self.assertEqual(data.shape, ())
def test_mul_cubes(self):
cube = self.c1 * self.c2
data = cube.data
self.assertTrue(isinstance(data, np.ndarray))
self.assertEqual(data.shape, ())
def test_sub_scalar(self):
cube = self.c1 - 5
data = cube.data
self.assertTrue(isinstance(data, np.ndarray))
self.assertEqual(data.shape, ())
def test_sub_cubes(self):
cube = self.c1 - self.c2
data = cube.data
self.assertTrue(isinstance(data, np.ndarray))
self.assertEqual(data.shape, ())
def test_div_scalar(self):
cube = self.c1 / 5
data = cube.data
self.assertTrue(isinstance(data, np.ndarray))
self.assertEqual(data.shape, ())
def test_div_cubes(self):
cube = self.c1 / self.c2
data = cube.data
self.assertTrue(isinstance(data, np.ndarray))
self.assertEqual(data.shape, ())
if __name__ == "__main__":
tests.main()
| gpl-3.0 |
rmanocha/bajar-de-peso | appengine_django/models.py | 7 | 6618 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import types
from google.appengine.ext import db
from django import VERSION
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.fields import Field
from django.db.models.options import Options
from django.db.models.loading import register_models, get_model
class ModelManager(object):
"""Replacement for the default Django model manager."""
def __init__(self, owner):
self.owner = owner
def __getattr__(self, name):
"""Pass all attribute requests through to the real model"""
return getattr(self.owner, name)
class ModelOptions(object):
"""Replacement for the default Django options class.
This class sits at ._meta of each model. The primary information supplied by
this class that needs to be stubbed out is the list of fields on the model.
"""
# Django 1.1 compat
proxy = None
def __init__(self, cls):
self.object_name = cls.__name__
self.module_name = self.object_name.lower()
model_module = sys.modules[cls.__module__]
self.app_label = model_module.__name__.split('.')[-2]
self.abstract = False
class pk:
"""Stub the primary key to always be 'key_name'"""
name = "key_name"
def __str__(self):
return "%s.%s" % (self.app_label, self.module_name)
@property
def many_to_many(self):
"""The datastore does not support many to many relationships."""
return []
class Relation(object):
def __init__(self, to):
self.field_name = "key_name"
def PropertyWrapper(prop):
"""Wrapper for db.Property to make it look like a Django model Property"""
if isinstance(prop, db.Reference):
prop.rel = Relation(prop.reference_class)
else:
prop.rel = None
prop.serialize = True
# NOTE(termie): These are rather useless hacks to get around Django changing
# their approach to "fields" and breaking encapsulation a bit,
def _get_val_from_obj(obj):
if obj:
return getattr(obj, prop.name)
else:
return prop.default_value()
def value_to_string(obj):
if obj:
return str(getattr(obj, prop.name))
else:
return str(prop.default_value())
prop._get_val_from_obj = _get_val_from_obj
prop.value_to_string = value_to_string
return prop
class PropertiedClassWithDjango(db.PropertiedClass):
"""Metaclass for the combined Django + App Engine model class.
This metaclass inherits from db.PropertiedClass in the appengine library.
This metaclass has two additional purposes:
1) Register each model class created with Django (the parent class will take
care of registering it with the appengine libraries).
2) Add the (minimum number) of attributes and methods to make Django believe
the class is a normal Django model.
The resulting classes are still not generally useful as Django classes and
are intended to be used by Django only in limited situations such as loading
and dumping fixtures.
"""
def __new__(cls, name, bases, attrs):
"""Creates a combined appengine and Django model.
The resulting model will be known to both the appengine libraries and
Django.
"""
if name == 'BaseModel':
# This metaclass only acts on subclasses of BaseModel.
return super(PropertiedClassWithDjango, cls).__new__(cls, name,
bases, attrs)
new_class = super(PropertiedClassWithDjango, cls).__new__(cls, name,
bases, attrs)
new_class._meta = ModelOptions(new_class)
new_class.objects = ModelManager(new_class)
new_class._default_manager = new_class.objects
new_class.DoesNotExist = types.ClassType('DoesNotExist',
(ObjectDoesNotExist,), {})
m = get_model(new_class._meta.app_label, name, False)
if m:
return m
register_models(new_class._meta.app_label, new_class)
return get_model(new_class._meta.app_label, name, False)
def __init__(cls, name, bases, attrs):
"""Initialises the list of Django properties.
This method takes care of wrapping the properties created by the superclass
so that they look like Django properties and installing them into the
._meta object of the class so that Django can find them at the appropriate
time.
"""
super(PropertiedClassWithDjango, cls).__init__(name, bases, attrs)
if name == 'BaseModel':
# This metaclass only acts on subclasses of BaseModel.
return
fields = [PropertyWrapper(p) for p in cls._properties.values()]
cls._meta.local_fields = fields
class BaseModel(db.Model):
"""Combined appengine and Django model.
All models used in the application should derive from this class.
"""
__metaclass__ = PropertiedClassWithDjango
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self._get_pk_val() == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def _get_pk_val(self):
"""Return the string representation of the model's key"""
return unicode(self.key())
def __repr__(self):
"""Create a string that can be used to construct an equivalent object.
e.g. eval(repr(obj)) == obj
"""
# First, creates a dictionary of property names and values. Note that
# property values, not property objects, has to be passed in to constructor.
def _MakeReprTuple(prop_name):
prop = getattr(self.__class__, prop_name)
return (prop_name, prop.get_value_for_datastore(self))
d = dict([_MakeReprTuple(prop_name) for prop_name in self.properties()])
return "%s(**%s)" % (self.__class__.__name__, repr(d))
class RegistrationTestModel(BaseModel):
"""Used to check registration with Django is working correctly.
Django 0.96 only recognises models defined within an applications models
module when get_models() is called so this definition must be here rather
than within the associated test (tests/model_test.py).
"""
pass
| gpl-2.0 |
suninsky/ReceiptOCR | Python/server/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/big5prober.py | 2931 | 1684 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
| mit |
simbs/edx-platform | common/djangoapps/util/tests/test_request.py | 113 | 2390 | """Tests for util.request module."""
import unittest
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.test.client import RequestFactory
from util.request import course_id_from_url, safe_get_host
class ResponseTestCase(unittest.TestCase):
""" Tests for response-related utility functions """
def setUp(self):
super(ResponseTestCase, self).setUp()
self.old_site_name = settings.SITE_NAME
self.old_allowed_hosts = settings.ALLOWED_HOSTS
def tearDown(self):
super(ResponseTestCase, self).tearDown()
settings.SITE_NAME = self.old_site_name
settings.ALLOWED_HOSTS = self.old_allowed_hosts
def test_safe_get_host(self):
""" Tests that the safe_get_host function returns the desired host """
settings.SITE_NAME = 'siteName.com'
factory = RequestFactory()
request = factory.request()
request.META['HTTP_HOST'] = 'www.userProvidedHost.com'
# If ALLOWED_HOSTS is not set properly, safe_get_host should return SITE_NAME
settings.ALLOWED_HOSTS = None
self.assertEqual(safe_get_host(request), "siteName.com")
settings.ALLOWED_HOSTS = ["*"]
self.assertEqual(safe_get_host(request), "siteName.com")
settings.ALLOWED_HOSTS = ["foo.com", "*"]
self.assertEqual(safe_get_host(request), "siteName.com")
# If ALLOWED_HOSTS is set properly, and the host is valid, we just return the user-provided host
settings.ALLOWED_HOSTS = [request.META['HTTP_HOST']]
self.assertEqual(safe_get_host(request), request.META['HTTP_HOST'])
# If ALLOWED_HOSTS is set properly but the host is invalid, we should get a SuspiciousOperation
settings.ALLOWED_HOSTS = ["the_valid_website.com"]
with self.assertRaises(SuspiciousOperation):
safe_get_host(request)
def test_course_id_from_url(self):
""" Test course_id_from_url(). """
self.assertIsNone(course_id_from_url('/login'))
self.assertIsNone(course_id_from_url('/course/edX/maths/2020'))
self.assertIsNone(course_id_from_url('/courses/edX/maths/'))
course_id = course_id_from_url('/courses/edX/maths/2020')
self.assertEqual(course_id.org, 'edX')
self.assertEqual(course_id.course, 'maths')
self.assertEqual(course_id.run, '2020')
| agpl-3.0 |
home-assistant/home-assistant | homeassistant/components/blink/sensor.py | 5 | 2650 | """Support for Blink system camera sensors."""
import logging
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import (
DEVICE_CLASS_SIGNAL_STRENGTH,
DEVICE_CLASS_TEMPERATURE,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
TEMP_FAHRENHEIT,
)
from .const import DOMAIN, TYPE_TEMPERATURE, TYPE_WIFI_STRENGTH
_LOGGER = logging.getLogger(__name__)
SENSORS = {
TYPE_TEMPERATURE: ["Temperature", TEMP_FAHRENHEIT, DEVICE_CLASS_TEMPERATURE],
TYPE_WIFI_STRENGTH: [
"Wifi Signal",
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
DEVICE_CLASS_SIGNAL_STRENGTH,
],
}
async def async_setup_entry(hass, config, async_add_entities):
"""Initialize a Blink sensor."""
data = hass.data[DOMAIN][config.entry_id]
entities = []
for camera in data.cameras:
for sensor_type in SENSORS:
entities.append(BlinkSensor(data, camera, sensor_type))
async_add_entities(entities)
class BlinkSensor(SensorEntity):
"""A Blink camera sensor."""
def __init__(self, data, camera, sensor_type):
"""Initialize sensors from Blink camera."""
name, units, device_class = SENSORS[sensor_type]
self._name = f"{DOMAIN} {camera} {name}"
self._camera_name = name
self._type = sensor_type
self._device_class = device_class
self.data = data
self._camera = data.cameras[camera]
self._state = None
self._unit_of_measurement = units
self._unique_id = f"{self._camera.serial}-{self._type}"
self._sensor_key = self._type
if self._type == "temperature":
self._sensor_key = "temperature_calibrated"
@property
def name(self):
"""Return the name of the camera."""
return self._name
@property
def unique_id(self):
"""Return the unique id for the camera sensor."""
return self._unique_id
@property
def state(self):
"""Return the camera's current state."""
return self._state
@property
def device_class(self):
"""Return the device's class."""
return self._device_class
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
def update(self):
"""Retrieve sensor data from the camera."""
self.data.refresh()
try:
self._state = self._camera.attributes[self._sensor_key]
except KeyError:
self._state = None
_LOGGER.error(
"%s not a valid camera attribute. Did the API change?", self._sensor_key
)
| apache-2.0 |
jandebleser/django-wiki | testproject/testproject/wsgi.py | 3 | 1401 | """
WSGI config for testproject project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
from __future__ import absolute_import, unicode_literals
import os
import sys
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
PROJECT_PATH = os.path.abspath(os.path.split(__file__)[0])
PROJECT_PARENT = os.path.abspath(os.path.split(PROJECT_PATH)[0])
sys.path.append(PROJECT_PATH)
sys.path.append(PROJECT_PARENT)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testproject.settings")
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| gpl-3.0 |
bencharb/AutobahnPython | examples/asyncio/wamp/rpc/decorators/backend.py | 2 | 2883 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from os import environ
import datetime
try:
import asyncio
except ImportError:
# Trollius >= 0.3 was renamed
import trollius as asyncio
from autobahn import wamp
from autobahn.asyncio.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
An application component registering RPC endpoints using decorators.
"""
@asyncio.coroutine
def onJoin(self, details):
# register all methods on this object decorated with "@wamp.register"
# as a RPC endpoint
##
results = yield from self.register(self)
for res in results:
if isinstance(res, wamp.protocol.Registration):
# res is an Registration instance
print("Ok, registered procedure with registration ID {}".format(res.id))
else:
# res is an Failure instance
print("Failed to register procedure: {}".format(res))
@wamp.register('com.mathservice.add2')
def add2(self, x, y):
return x + y
@wamp.register('com.mathservice.mul2')
def mul2(self, x, y):
return x * y
@wamp.register('com.mathservice.div2')
def square(self, x, y):
if y:
return float(x) / float(y)
else:
return 0
if __name__ == '__main__':
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", "ws://127.0.0.1:8080/ws"),
u"crossbardemo",
debug_wamp=False, # optional; log many WAMP details
debug=False, # optional; log even more details
)
runner.run(Component)
| mit |
shubhdev/edx-platform | common/test/acceptance/pages/studio/users.py | 9 | 8778 | """
Page classes to test either the Course Team page or the Library Team page.
"""
from bok_choy.promise import EmptyPromise
from bok_choy.page_object import PageObject
from ...tests.helpers import disable_animations
from .course_page import CoursePage
from . import BASE_URL
def wait_for_ajax_or_reload(browser):
"""
Wait for all ajax requests to finish, OR for the page to reload.
Normal wait_for_ajax() chokes on occasion if the pages reloads,
giving "WebDriverException: Message: u'jQuery is not defined'"
"""
def _is_ajax_finished():
""" Wait for jQuery to finish all AJAX calls, if it is present. """
return browser.execute_script("return typeof(jQuery) == 'undefined' || jQuery.active == 0")
EmptyPromise(_is_ajax_finished, "Finished waiting for ajax requests.").fulfill()
class UsersPageMixin(PageObject):
""" Common functionality for course/library team pages """
new_user_form_selector = '.form-create.create-user .user-email-input'
def url(self):
"""
URL to this page - override in subclass
"""
raise NotImplementedError
def is_browser_on_page(self):
"""
Returns True iff the browser has loaded the page.
"""
return self.q(css='body.view-team').present
@property
def users(self):
"""
Return a list of users listed on this page.
"""
return self.q(css='.user-list .user-item').map(
lambda el: UserWrapper(self.browser, el.get_attribute('data-email'))
).results
@property
def usernames(self):
"""
Returns a list of user names for users listed on this page
"""
return [user.name for user in self.users]
@property
def has_add_button(self):
"""
Is the "New Team Member" button present?
"""
return self.q(css='.create-user-button').present
def click_add_button(self):
"""
Click on the "New Team Member" button
"""
self.q(css='.create-user-button').first.click()
self.wait_for(lambda: self.new_user_form_visible, "Add user form is visible")
@property
def new_user_form_visible(self):
""" Is the new user form visible? """
return self.q(css='.form-create.create-user .user-email-input').visible
def set_new_user_email(self, email):
""" Set the value of the "New User Email Address" field. """
self.q(css='.form-create.create-user .user-email-input').fill(email)
def click_submit_new_user_form(self):
""" Submit the "New User" form """
self.q(css='.form-create.create-user .action-primary').click()
wait_for_ajax_or_reload(self.browser)
def get_user(self, email):
""" Gets user wrapper by email """
target_users = [user for user in self.users if user.email == email]
assert len(target_users) == 1
return target_users[0]
def add_user_to_course(self, email):
""" Adds user to a course/library """
self.wait_for_element_visibility('.create-user-button', "Add team member button is available")
self.click_add_button()
self.set_new_user_email(email)
self.click_submit_new_user_form()
def delete_user_from_course(self, email):
""" Deletes user from course/library """
target_user = self.get_user(email)
target_user.click_delete()
def modal_dialog_visible(self, dialog_type):
""" Checks if modal dialog of specified class is displayed """
return self.q(css='.prompt.{dialog_type}'.format(dialog_type=dialog_type)).visible
def modal_dialog_text(self, dialog_type):
""" Gets modal dialog text """
return self.q(css='.prompt.{dialog_type} .message'.format(dialog_type=dialog_type)).text[0]
def wait_until_ready(self):
"""
When the page first loads, there is a loading indicator and most
functionality is not yet available. This waits for that loading to
finish.
Always call this before using the page. It also disables animations
for improved test reliability.
"""
self.wait_for_element_invisibility(
'.ui-loading',
'Wait for the page to complete its initial loading and rendering via Backbone'
)
disable_animations(self)
class LibraryUsersPage(UsersPageMixin):
"""
Library Team page in Studio
"""
def __init__(self, browser, locator):
super(LibraryUsersPage, self).__init__(browser)
self.locator = locator
@property
def url(self):
"""
URL to the "User Access" page for the given library.
"""
return "{}/library/{}/team/".format(BASE_URL, unicode(self.locator))
class CourseTeamPage(CoursePage, UsersPageMixin):
"""
Course Team page in Studio.
"""
url_path = "course_team"
class UserWrapper(PageObject):
"""
A PageObject representing a wrapper around a user listed on the course/library team page.
"""
url = None
COMPONENT_BUTTONS = {
'basic_tab': '.editor-tabs li.inner_tab_wrap:nth-child(1) > a',
'advanced_tab': '.editor-tabs li.inner_tab_wrap:nth-child(2) > a',
'save_settings': '.action-save',
}
def __init__(self, browser, email):
super(UserWrapper, self).__init__(browser)
self.email = email
self.selector = '.user-list .user-item[data-email="{}"]'.format(self.email)
def is_browser_on_page(self):
"""
Sanity check that our wrapper element is on the page.
"""
return self.q(css=self.selector).present
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular user entry's context
"""
return '{} {}'.format(self.selector, selector)
@property
def name(self):
""" Get this user's username, as displayed. """
return self.q(css=self._bounded_selector('.user-username')).text[0]
@property
def role_label(self):
""" Get this user's role, as displayed. """
return self.q(css=self._bounded_selector('.flag-role .value')).text[0]
@property
def is_current_user(self):
""" Does the UI indicate that this is the current user? """
return self.q(css=self._bounded_selector('.flag-role .msg-you')).present
@property
def can_promote(self):
""" Can this user be promoted to a more powerful role? """
return self.q(css=self._bounded_selector('.add-admin-role')).present
@property
def promote_button_text(self):
""" What does the promote user button say? """
return self.q(css=self._bounded_selector('.add-admin-role')).text[0]
def click_promote(self):
""" Click on the button to promote this user to the more powerful role """
self.q(css=self._bounded_selector('.add-admin-role')).click()
wait_for_ajax_or_reload(self.browser)
@property
def can_demote(self):
""" Can this user be demoted to a less powerful role? """
return self.q(css=self._bounded_selector('.remove-admin-role')).present
@property
def demote_button_text(self):
""" What does the demote user button say? """
return self.q(css=self._bounded_selector('.remove-admin-role')).text[0]
def click_demote(self):
""" Click on the button to demote this user to the less powerful role """
self.q(css=self._bounded_selector('.remove-admin-role')).click()
wait_for_ajax_or_reload(self.browser)
@property
def can_delete(self):
""" Can this user be deleted? """
return self.q(css=self._bounded_selector('.action-delete:not(.is-disabled) .remove-user')).present
def click_delete(self):
""" Click the button to delete this user. """
disable_animations(self)
self.q(css=self._bounded_selector('.remove-user')).click()
# We can't use confirm_prompt because its wait_for_ajax is flaky when the page is expected to reload.
self.wait_for_element_visibility('.prompt', 'Prompt is visible')
self.wait_for_element_visibility('.prompt .action-primary', 'Confirmation button is visible')
self.q(css='.prompt .action-primary').click()
wait_for_ajax_or_reload(self.browser)
@property
def has_no_change_warning(self):
""" Does this have a warning in place of the promote/demote buttons? """
return self.q(css=self._bounded_selector('.notoggleforyou')).present
@property
def no_change_warning_text(self):
""" Text of the warning seen in place of the promote/demote buttons. """
return self.q(css=self._bounded_selector('.notoggleforyou')).text[0]
| agpl-3.0 |
diogocs1/comps | web/addons/mrp/wizard/__init__.py | 374 | 1199 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import mrp_product_produce
import mrp_price
import mrp_workcenter_load
import change_production_qty
import stock_move
#import mrp_change_standard_price
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| apache-2.0 |
PatrickChrist/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 206 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
beiko-lab/gengis | bin/Lib/site-packages/scipy/spatial/tests/test_distance.py | 1 | 74493 | #! /usr/bin/env python
#
# Author: Damian Eads
# Date: April 17, 2008
#
# Copyright (C) 2008 Damian Eads
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import os.path
from scipy.lib.six.moves import xrange
import numpy as np
from numpy.linalg import norm
from numpy.testing import (verbose, TestCase, run_module_suite,
assert_raises, assert_array_equal, assert_equal, assert_almost_equal,
assert_allclose)
from scipy.lib.six import u
from scipy.spatial.distance import (squareform, pdist, cdist, matching,
jaccard, dice, sokalsneath, rogerstanimoto, russellrao, yule,
num_obs_y, num_obs_dm, is_valid_dm, is_valid_y, minkowski, wminkowski,
euclidean, sqeuclidean, cosine, correlation, mahalanobis,
canberra, braycurtis, sokalmichener, _validate_vector)
_filenames = ["iris.txt",
"cdist-X1.txt",
"cdist-X2.txt",
"pdist-hamming-ml.txt",
"pdist-boolean-inp.txt",
"pdist-jaccard-ml.txt",
"pdist-cityblock-ml-iris.txt",
"pdist-minkowski-3.2-ml-iris.txt",
"pdist-cityblock-ml.txt",
"pdist-correlation-ml-iris.txt",
"pdist-minkowski-5.8-ml-iris.txt",
"pdist-correlation-ml.txt",
"pdist-minkowski-3.2-ml.txt",
"pdist-cosine-ml-iris.txt",
"pdist-seuclidean-ml-iris.txt",
"pdist-cosine-ml.txt",
"pdist-seuclidean-ml.txt",
"pdist-double-inp.txt",
"pdist-spearman-ml.txt",
"pdist-euclidean-ml.txt",
"pdist-euclidean-ml-iris.txt",
"pdist-chebychev-ml.txt",
"pdist-chebychev-ml-iris.txt",
"random-bool-data.txt"]
_tdist = np.array([[0, 662, 877, 255, 412, 996],
[662, 0, 295, 468, 268, 400],
[877, 295, 0, 754, 564, 138],
[255, 468, 754, 0, 219, 869],
[412, 268, 564, 219, 0, 669],
[996, 400, 138, 869, 669, 0]], dtype='double')
_ytdist = squareform(_tdist)
# A hashmap of expected output arrays for the tests. These arrays
# come from a list of text files, which are read prior to testing.
eo = {}
def load_testing_files():
"Loading test data files for the scipy.spatial.distance tests."
for fn in _filenames:
name = fn.replace(".txt", "").replace("-ml", "")
fqfn = os.path.join(os.path.dirname(__file__), 'data', fn)
fp = open(fqfn)
eo[name] = np.loadtxt(fp)
fp.close()
#print "%s: %s %s" % (name, str(eo[name].shape), str(eo[name].dtype))
eo['pdist-boolean-inp'] = np.bool_(eo['pdist-boolean-inp'])
load_testing_files()
class TestCdist(TestCase):
"""
Test suite for the cdist function.
"""
def test_cdist_euclidean_random(self):
"Tests cdist(X, 'euclidean') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'euclidean')
Y2 = cdist(X1, X2, 'test_euclidean')
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_euclidean_random_unicode(self):
"Tests cdist(X, u'euclidean') using unicode metric string"
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, u('euclidean'))
Y2 = cdist(X1, X2, u('test_euclidean'))
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_sqeuclidean_random(self):
"Tests cdist(X, 'sqeuclidean') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'sqeuclidean')
Y2 = cdist(X1, X2, 'test_sqeuclidean')
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_cityblock_random(self):
"Tests cdist(X, 'cityblock') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'cityblock')
Y2 = cdist(X1, X2, 'test_cityblock')
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_hamming_double_random(self):
"Tests cdist(X, 'hamming') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'hamming')
Y2 = cdist(X1, X2, 'test_hamming')
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_hamming_bool_random(self):
"Tests cdist(X, 'hamming') on random boolean data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'hamming')
Y2 = cdist(X1, X2, 'test_hamming')
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_jaccard_double_random(self):
"Tests cdist(X, 'jaccard') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'jaccard')
Y2 = cdist(X1, X2, 'test_jaccard')
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_jaccard_bool_random(self):
"Tests cdist(X, 'jaccard') on random boolean data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'jaccard')
Y2 = cdist(X1, X2, 'test_jaccard')
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_chebychev_random(self):
"Tests cdist(X, 'chebychev') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'chebychev')
Y2 = cdist(X1, X2, 'test_chebychev')
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_minkowski_random_p3d8(self):
"Tests cdist(X, 'minkowski') on random data. (p=3.8)"
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'minkowski', p=3.8)
Y2 = cdist(X1, X2, 'test_minkowski', p=3.8)
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_minkowski_random_p4d6(self):
"Tests cdist(X, 'minkowski') on random data. (p=4.6)"
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'minkowski', p=4.6)
Y2 = cdist(X1, X2, 'test_minkowski', p=4.6)
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_minkowski_random_p1d23(self):
"Tests cdist(X, 'minkowski') on random data. (p=1.23)"
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'minkowski', p=1.23)
Y2 = cdist(X1, X2, 'test_minkowski', p=1.23)
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_wminkowski_random_p3d8(self):
"Tests cdist(X, 'wminkowski') on random data. (p=3.8)"
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
w = 1.0 / X1.std(axis=0)
Y1 = cdist(X1, X2, 'wminkowski', p=3.8, w=w)
Y2 = cdist(X1, X2, 'test_wminkowski', p=3.8, w=w)
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_wminkowski_random_p4d6(self):
"Tests cdist(X, 'wminkowski') on random data. (p=4.6)"
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
w = 1.0 / X1.std(axis=0)
Y1 = cdist(X1, X2, 'wminkowski', p=4.6, w=w)
Y2 = cdist(X1, X2, 'test_wminkowski', p=4.6, w=w)
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_wminkowski_random_p1d23(self):
"Tests cdist(X, 'wminkowski') on random data. (p=1.23)"
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
w = 1.0 / X1.std(axis=0)
Y1 = cdist(X1, X2, 'wminkowski', p=1.23, w=w)
Y2 = cdist(X1, X2, 'test_wminkowski', p=1.23, w=w)
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_seuclidean_random(self):
"Tests cdist(X, 'seuclidean') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'seuclidean')
Y2 = cdist(X1, X2, 'test_seuclidean')
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_cosine_random(self):
"Tests cdist(X, 'cosine') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'cosine')
Y2 = cdist(X1, X2, 'test_cosine')
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_correlation_random(self):
"Tests cdist(X, 'correlation') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'correlation')
Y2 = cdist(X1, X2, 'test_correlation')
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_mahalanobis_random(self):
"Tests cdist(X, 'mahalanobis') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'mahalanobis')
Y2 = cdist(X1, X2, 'test_mahalanobis')
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_canberra_random(self):
"Tests cdist(X, 'canberra') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'canberra')
Y2 = cdist(X1, X2, 'test_canberra')
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_braycurtis_random(self):
"Tests cdist(X, 'braycurtis') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'braycurtis')
Y2 = cdist(X1, X2, 'test_braycurtis')
if verbose > 2:
print(Y1, Y2)
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_yule_random(self):
"Tests cdist(X, 'yule') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'yule')
Y2 = cdist(X1, X2, 'test_yule')
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_matching_random(self):
"Tests cdist(X, 'matching') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'matching')
Y2 = cdist(X1, X2, 'test_matching')
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_kulsinski_random(self):
"Tests cdist(X, 'kulsinski') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'kulsinski')
Y2 = cdist(X1, X2, 'test_kulsinski')
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_dice_random(self):
"Tests cdist(X, 'dice') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'dice')
Y2 = cdist(X1, X2, 'test_dice')
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_rogerstanimoto_random(self):
"Tests cdist(X, 'rogerstanimoto') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'rogerstanimoto')
Y2 = cdist(X1, X2, 'test_rogerstanimoto')
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_russellrao_random(self):
"Tests cdist(X, 'russellrao') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'russellrao')
Y2 = cdist(X1, X2, 'test_russellrao')
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_sokalmichener_random(self):
"Tests cdist(X, 'sokalmichener') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'sokalmichener')
Y2 = cdist(X1, X2, 'test_sokalmichener')
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
def test_cdist_sokalsneath_random(self):
"Tests cdist(X, 'sokalsneath') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'sokalsneath')
Y2 = cdist(X1, X2, 'test_sokalsneath')
if verbose > 2:
print((Y1-Y2).max())
self.assertTrue(within_tol(Y1, Y2, eps))
class TestPdist(TestCase):
"""
Test suite for the pdist function.
"""
################### pdist: euclidean
def test_pdist_euclidean_random(self):
"Tests pdist(X, 'euclidean') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X = eo['pdist-double-inp']
Y_right = eo['pdist-euclidean']
Y_test1 = pdist(X, 'euclidean')
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_euclidean_random_u(self):
"Tests pdist(X, 'euclidean') with unicode metric string"
eps = 1e-07
# Get the data: the input matrix and the right output.
X = eo['pdist-double-inp']
Y_right = eo['pdist-euclidean']
Y_test1 = pdist(X, u('euclidean'))
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_euclidean_random_float32(self):
"Tests pdist(X, 'euclidean') on random data (float32)."
eps = 1e-07
# Get the data: the input matrix and the right output.
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-euclidean']
Y_test1 = pdist(X, 'euclidean')
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_euclidean_random_nonC(self):
"Tests pdist(X, 'test_euclidean') [the non-C implementation] on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X = eo['pdist-double-inp']
Y_right = eo['pdist-euclidean']
Y_test2 = pdist(X, 'test_euclidean')
self.assertTrue(within_tol(Y_test2, Y_right, eps))
def test_pdist_euclidean_iris_double(self):
"Tests pdist(X, 'euclidean') on the Iris data set."
eps = 1e-07
# Get the data: the input matrix and the right output.
X = eo['iris']
Y_right = eo['pdist-euclidean-iris']
Y_test1 = pdist(X, 'euclidean')
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_euclidean_iris_float32(self):
"Tests pdist(X, 'euclidean') on the Iris data set. (float32)"
eps = 1e-06
# Get the data: the input matrix and the right output.
X = np.float32(eo['iris'])
Y_right = eo['pdist-euclidean-iris']
Y_test1 = pdist(X, 'euclidean')
if verbose > 2:
print(np.abs(Y_right - Y_test1).max())
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_euclidean_iris_nonC(self):
"Tests pdist(X, 'test_euclidean') [the non-C implementation] on the Iris data set."
eps = 1e-07
# Get the data: the input matrix and the right output.
X = eo['iris']
Y_right = eo['pdist-euclidean-iris']
Y_test2 = pdist(X, 'test_euclidean')
self.assertTrue(within_tol(Y_test2, Y_right, eps))
################### pdist: seuclidean
def test_pdist_seuclidean_random(self):
"Tests pdist(X, 'seuclidean') on random data."
eps = 1e-05
# Get the data: the input matrix and the right output.
X = eo['pdist-double-inp']
Y_right = eo['pdist-seuclidean']
Y_test1 = pdist(X, 'seuclidean')
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_seuclidean_random_float32(self):
"Tests pdist(X, 'seuclidean') on random data (float32)."
eps = 1e-05
# Get the data: the input matrix and the right output.
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-seuclidean']
Y_test1 = pdist(X, 'seuclidean')
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_seuclidean_random_nonC(self):
"Tests pdist(X, 'test_sqeuclidean') [the non-C implementation] on random data."
eps = 1e-05
# Get the data: the input matrix and the right output.
X = eo['pdist-double-inp']
Y_right = eo['pdist-seuclidean']
Y_test2 = pdist(X, 'test_sqeuclidean')
self.assertTrue(within_tol(Y_test2, Y_right, eps))
def test_pdist_seuclidean_iris(self):
"Tests pdist(X, 'seuclidean') on the Iris data set."
eps = 1e-05
# Get the data: the input matrix and the right output.
X = eo['iris']
Y_right = eo['pdist-seuclidean-iris']
Y_test1 = pdist(X, 'seuclidean')
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_seuclidean_iris_float32(self):
"Tests pdist(X, 'seuclidean') on the Iris data set (float32)."
eps = 1e-05
# Get the data: the input matrix and the right output.
X = np.float32(eo['iris'])
Y_right = eo['pdist-seuclidean-iris']
Y_test1 = pdist(X, 'seuclidean')
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_seuclidean_iris_nonC(self):
"Tests pdist(X, 'test_seuclidean') [the non-C implementation] on the Iris data set."
eps = 1e-05
# Get the data: the input matrix and the right output.
X = eo['iris']
Y_right = eo['pdist-seuclidean-iris']
Y_test2 = pdist(X, 'test_sqeuclidean')
self.assertTrue(within_tol(Y_test2, Y_right, eps))
################### pdist: cosine
def test_pdist_cosine_random(self):
"Tests pdist(X, 'cosine') on random data."
eps = 1e-08
# Get the data: the input matrix and the right output.
X = eo['pdist-double-inp']
Y_right = eo['pdist-cosine']
Y_test1 = pdist(X, 'cosine')
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_cosine_random_float32(self):
"Tests pdist(X, 'cosine') on random data. (float32)"
eps = 1e-08
# Get the data: the input matrix and the right output.
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-cosine']
Y_test1 = pdist(X, 'cosine')
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_cosine_random_nonC(self):
"Tests pdist(X, 'test_cosine') [the non-C implementation] on random data."
eps = 1e-08
# Get the data: the input matrix and the right output.
X = eo['pdist-double-inp']
Y_right = eo['pdist-cosine']
Y_test2 = pdist(X, 'test_cosine')
self.assertTrue(within_tol(Y_test2, Y_right, eps))
def test_pdist_cosine_iris(self):
"Tests pdist(X, 'cosine') on the Iris data set."
eps = 1e-08
# Get the data: the input matrix and the right output.
X = eo['iris']
Y_right = eo['pdist-cosine-iris']
Y_test1 = pdist(X, 'cosine')
self.assertTrue(within_tol(Y_test1, Y_right, eps))
#print "cosine-iris", np.abs(Y_test1 - Y_right).max()
def test_pdist_cosine_iris_float32(self):
"Tests pdist(X, 'cosine') on the Iris data set."
eps = 1e-07
# Get the data: the input matrix and the right output.
X = np.float32(eo['iris'])
Y_right = eo['pdist-cosine-iris']
Y_test1 = pdist(X, 'cosine')
if verbose > 2:
print(np.abs(Y_test1 - Y_right).max())
self.assertTrue(within_tol(Y_test1, Y_right, eps))
#print "cosine-iris", np.abs(Y_test1 - Y_right).max()
def test_pdist_cosine_iris_nonC(self):
"Tests pdist(X, 'test_cosine') [the non-C implementation] on the Iris data set."
eps = 1e-08
# Get the data: the input matrix and the right output.
X = eo['iris']
Y_right = eo['pdist-cosine-iris']
Y_test2 = pdist(X, 'test_cosine')
self.assertTrue(within_tol(Y_test2, Y_right, eps))
################### pdist: cityblock
def test_pdist_cityblock_random(self):
"Tests pdist(X, 'cityblock') on random data."
eps = 1e-06
# Get the data: the input matrix and the right output.
X = eo['pdist-double-inp']
Y_right = eo['pdist-cityblock']
Y_test1 = pdist(X, 'cityblock')
#print "cityblock", np.abs(Y_test1 - Y_right).max()
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_cityblock_random_float32(self):
"Tests pdist(X, 'cityblock') on random data. (float32)"
eps = 1e-06
# Get the data: the input matrix and the right output.
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-cityblock']
Y_test1 = pdist(X, 'cityblock')
#print "cityblock", np.abs(Y_test1 - Y_right).max()
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_cityblock_random_nonC(self):
"Tests pdist(X, 'test_cityblock') [the non-C implementation] on random data."
eps = 1e-06
# Get the data: the input matrix and the right output.
X = eo['pdist-double-inp']
Y_right = eo['pdist-cityblock']
Y_test2 = pdist(X, 'test_cityblock')
self.assertTrue(within_tol(Y_test2, Y_right, eps))
def test_pdist_cityblock_iris(self):
"Tests pdist(X, 'cityblock') on the Iris data set."
eps = 1e-14
# Get the data: the input matrix and the right output.
X = eo['iris']
Y_right = eo['pdist-cityblock-iris']
Y_test1 = pdist(X, 'cityblock')
self.assertTrue(within_tol(Y_test1, Y_right, eps))
#print "cityblock-iris", np.abs(Y_test1 - Y_right).max()
def test_pdist_cityblock_iris_float32(self):
"Tests pdist(X, 'cityblock') on the Iris data set. (float32)"
eps = 1e-06
# Get the data: the input matrix and the right output.
X = np.float32(eo['iris'])
Y_right = eo['pdist-cityblock-iris']
Y_test1 = pdist(X, 'cityblock')
if verbose > 2:
print("cityblock-iris-float32", np.abs(Y_test1 - Y_right).max())
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_cityblock_iris_nonC(self):
"Tests pdist(X, 'test_cityblock') [the non-C implementation] on the Iris data set."
eps = 1e-14
# Get the data: the input matrix and the right output.
X = eo['iris']
Y_right = eo['pdist-cityblock-iris']
Y_test2 = pdist(X, 'test_cityblock')
self.assertTrue(within_tol(Y_test2, Y_right, eps))
################### pdist: correlation
def test_pdist_correlation_random(self):
"Tests pdist(X, 'correlation') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X = eo['pdist-double-inp']
Y_right = eo['pdist-correlation']
Y_test1 = pdist(X, 'correlation')
#print "correlation", np.abs(Y_test1 - Y_right).max()
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_correlation_random_float32(self):
"Tests pdist(X, 'correlation') on random data. (float32)"
eps = 1e-07
# Get the data: the input matrix and the right output.
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-correlation']
Y_test1 = pdist(X, 'correlation')
#print "correlation", np.abs(Y_test1 - Y_right).max()
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_correlation_random_nonC(self):
"Tests pdist(X, 'test_correlation') [the non-C implementation] on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X = eo['pdist-double-inp']
Y_right = eo['pdist-correlation']
Y_test2 = pdist(X, 'test_correlation')
self.assertTrue(within_tol(Y_test2, Y_right, eps))
def test_pdist_correlation_iris(self):
"Tests pdist(X, 'correlation') on the Iris data set."
eps = 1e-08
# Get the data: the input matrix and the right output.
X = eo['iris']
Y_right = eo['pdist-correlation-iris']
Y_test1 = pdist(X, 'correlation')
#print "correlation-iris", np.abs(Y_test1 - Y_right).max()
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_correlation_iris_float32(self):
"Tests pdist(X, 'correlation') on the Iris data set. (float32)"
eps = 1e-07
# Get the data: the input matrix and the right output.
X = eo['iris']
Y_right = np.float32(eo['pdist-correlation-iris'])
Y_test1 = pdist(X, 'correlation')
if verbose > 2:
print("correlation-iris", np.abs(Y_test1 - Y_right).max())
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_correlation_iris_nonC(self):
"Tests pdist(X, 'test_correlation') [the non-C implementation] on the Iris data set."
eps = 1e-08
# Get the data: the input matrix and the right output.
X = eo['iris']
Y_right = eo['pdist-correlation-iris']
Y_test2 = pdist(X, 'test_correlation')
#print "test-correlation-iris", np.abs(Y_test2 - Y_right).max()
self.assertTrue(within_tol(Y_test2, Y_right, eps))
################# minkowski
def test_pdist_minkowski_random(self):
"Tests pdist(X, 'minkowski') on random data."
eps = 1e-05
# Get the data: the input matrix and the right output.
X = eo['pdist-double-inp']
Y_right = eo['pdist-minkowski-3.2']
Y_test1 = pdist(X, 'minkowski', 3.2)
#print "minkowski", np.abs(Y_test1 - Y_right).max()
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_minkowski_random_float32(self):
"Tests pdist(X, 'minkowski') on random data. (float32)"
eps = 1e-05
# Get the data: the input matrix and the right output.
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-minkowski-3.2']
Y_test1 = pdist(X, 'minkowski', 3.2)
#print "minkowski", np.abs(Y_test1 - Y_right).max()
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_minkowski_random_nonC(self):
"Tests pdist(X, 'test_minkowski') [the non-C implementation] on random data."
eps = 1e-05
# Get the data: the input matrix and the right output.
X = eo['pdist-double-inp']
Y_right = eo['pdist-minkowski-3.2']
Y_test2 = pdist(X, 'test_minkowski', 3.2)
self.assertTrue(within_tol(Y_test2, Y_right, eps))
def test_pdist_minkowski_3_2_iris(self):
"Tests pdist(X, 'minkowski') on iris data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X = eo['iris']
Y_right = eo['pdist-minkowski-3.2-iris']
Y_test1 = pdist(X, 'minkowski', 3.2)
#print "minkowski-iris-3.2", np.abs(Y_test1 - Y_right).max()
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_minkowski_3_2_iris_float32(self):
"Tests pdist(X, 'minkowski') on iris data. (float32)"
eps = 1e-06
# Get the data: the input matrix and the right output.
X = np.float32(eo['iris'])
Y_right = eo['pdist-minkowski-3.2-iris']
Y_test1 = pdist(X, 'minkowski', 3.2)
#print "minkowski-iris-3.2", np.abs(Y_test1 - Y_right).max()
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_minkowski_3_2_iris_nonC(self):
"Tests pdist(X, 'test_minkowski') [the non-C implementation] on iris data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X = eo['iris']
Y_right = eo['pdist-minkowski-3.2-iris']
Y_test2 = pdist(X, 'test_minkowski', 3.2)
self.assertTrue(within_tol(Y_test2, Y_right, eps))
def test_pdist_minkowski_5_8_iris(self):
"Tests pdist(X, 'minkowski') on iris data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X = eo['iris']
Y_right = eo['pdist-minkowski-5.8-iris']
Y_test1 = pdist(X, 'minkowski', 5.8)
#print "minkowski-iris-5.8", np.abs(Y_test1 - Y_right).max()
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_minkowski_5_8_iris_float32(self):
"Tests pdist(X, 'minkowski') on iris data. (float32)"
eps = 1e-06
# Get the data: the input matrix and the right output.
X = np.float32(eo['iris'])
Y_right = eo['pdist-minkowski-5.8-iris']
Y_test1 = pdist(X, 'minkowski', 5.8)
if verbose > 2:
print("minkowski-iris-5.8", np.abs(Y_test1 - Y_right).max())
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_minkowski_5_8_iris_nonC(self):
"Tests pdist(X, 'test_minkowski') [the non-C implementation] on iris data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X = eo['iris']
Y_right = eo['pdist-minkowski-5.8-iris']
Y_test2 = pdist(X, 'test_minkowski', 5.8)
self.assertTrue(within_tol(Y_test2, Y_right, eps))
################# wminkowski
def test_pdist_wminkowski(self):
x = np.array([[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 1.0, 1.0]])
p2_expected = [1.0, 1.0, np.sqrt(3),
np.sqrt(2), np.sqrt(2),
np.sqrt(2)]
p1_expected = [0.5, 1.0, 3.5,
1.5, 3.0,
2.5]
dist = pdist(x, metric=wminkowski, w=[1.0, 1.0, 1.0])
assert_allclose(dist, p2_expected, rtol=1e-14)
dist = pdist(x, metric=wminkowski, w=[0.5, 1.0, 2.0], p=1)
assert_allclose(dist, p1_expected, rtol=1e-14)
dist = pdist(x, metric='wminkowski', w=[1.0, 1.0, 1.0])
assert_allclose(dist, p2_expected, rtol=1e-14)
dist = pdist(x, metric='wminkowski', w=[0.5, 1.0, 2.0], p=1)
assert_allclose(dist, p1_expected, rtol=1e-14)
################### pdist: hamming
def test_pdist_hamming_random(self):
"Tests pdist(X, 'hamming') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-hamming']
Y_test1 = pdist(X, 'hamming')
#print "hamming", np.abs(Y_test1 - Y_right).max()
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_hamming_random_float32(self):
"Tests pdist(X, 'hamming') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test1 = pdist(X, 'hamming')
#print "hamming", np.abs(Y_test1 - Y_right).max()
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_hamming_random_nonC(self):
"Tests pdist(X, 'test_hamming') [the non-C implementation] on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-hamming']
Y_test2 = pdist(X, 'test_hamming')
#print "test-hamming", np.abs(Y_test2 - Y_right).max()
self.assertTrue(within_tol(Y_test2, Y_right, eps))
################### pdist: hamming (double)
def test_pdist_dhamming_random(self):
"Tests pdist(X, 'hamming') on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test1 = pdist(X, 'hamming')
#print "hamming", np.abs(Y_test1 - Y_right).max()
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_dhamming_random_float32(self):
"Tests pdist(X, 'hamming') on random data. (float32)"
eps = 1e-07
# Get the data: the input matrix and the right output.
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test1 = pdist(X, 'hamming')
#print "hamming", np.abs(Y_test1 - Y_right).max()
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_dhamming_random_nonC(self):
"Tests pdist(X, 'test_hamming') [the non-C implementation] on random data."
eps = 1e-07
# Get the data: the input matrix and the right output.
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test2 = pdist(X, 'test_hamming')
#print "test-hamming", np.abs(Y_test2 - Y_right).max()
self.assertTrue(within_tol(Y_test2, Y_right, eps))
################### pdist: jaccard
def test_pdist_jaccard_random(self):
"Tests pdist(X, 'jaccard') on random data."
eps = 1e-08
# Get the data: the input matrix and the right output.
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-jaccard']
Y_test1 = pdist(X, 'jaccard')
#print "jaccard", np.abs(Y_test1 - Y_right).max()
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_jaccard_random_float32(self):
"Tests pdist(X, 'jaccard') on random data. (float32)"
eps = 1e-08
# Get the data: the input matrix and the right output.
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test1 = pdist(X, 'jaccard')
#print "jaccard", np.abs(Y_test1 - Y_right).max()
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_jaccard_random_nonC(self):
"Tests pdist(X, 'test_jaccard') [the non-C implementation] on random data."
eps = 1e-08
# Get the data: the input matrix and the right output.
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-jaccard']
Y_test2 = pdist(X, 'test_jaccard')
#print "test-jaccard", np.abs(Y_test2 - Y_right).max()
self.assertTrue(within_tol(Y_test2, Y_right, eps))
################### pdist: jaccard (double)
def test_pdist_djaccard_random(self):
"Tests pdist(X, 'jaccard') on random data."
eps = 1e-08
# Get the data: the input matrix and the right output.
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test1 = pdist(X, 'jaccard')
#print "jaccard", np.abs(Y_test1 - Y_right).max()
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_djaccard_random_float32(self):
"Tests pdist(X, 'jaccard') on random data. (float32)"
eps = 1e-08
# Get the data: the input matrix and the right output.
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test1 = pdist(X, 'jaccard')
#print "jaccard", np.abs(Y_test1 - Y_right).max()
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_djaccard_random_nonC(self):
"Tests pdist(X, 'test_jaccard') [the non-C implementation] on random data."
eps = 1e-08
# Get the data: the input matrix and the right output.
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test2 = pdist(X, 'test_jaccard')
#print "test-jaccard", np.abs(Y_test2 - Y_right).max()
self.assertTrue(within_tol(Y_test2, Y_right, eps))
################### pdist: chebychev
def test_pdist_chebychev_random(self):
"Tests pdist(X, 'chebychev') on random data."
eps = 1e-08
# Get the data: the input matrix and the right output.
X = eo['pdist-double-inp']
Y_right = eo['pdist-chebychev']
Y_test1 = pdist(X, 'chebychev')
#print "chebychev", np.abs(Y_test1 - Y_right).max()
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_chebychev_random_float32(self):
"Tests pdist(X, 'chebychev') on random data. (float32)"
eps = 1e-07
# Get the data: the input matrix and the right output.
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-chebychev']
Y_test1 = pdist(X, 'chebychev')
if verbose > 2:
print("chebychev", np.abs(Y_test1 - Y_right).max())
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_chebychev_random_nonC(self):
"Tests pdist(X, 'test_chebychev') [the non-C implementation] on random data."
eps = 1e-08
# Get the data: the input matrix and the right output.
X = eo['pdist-double-inp']
Y_right = eo['pdist-chebychev']
Y_test2 = pdist(X, 'test_chebychev')
#print "test-chebychev", np.abs(Y_test2 - Y_right).max()
self.assertTrue(within_tol(Y_test2, Y_right, eps))
def test_pdist_chebychev_iris(self):
"Tests pdist(X, 'chebychev') on the Iris data set."
eps = 1e-15
# Get the data: the input matrix and the right output.
X = eo['iris']
Y_right = eo['pdist-chebychev-iris']
Y_test1 = pdist(X, 'chebychev')
#print "chebychev-iris", np.abs(Y_test1 - Y_right).max()
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_chebychev_iris_float32(self):
"Tests pdist(X, 'chebychev') on the Iris data set. (float32)"
eps = 1e-06
# Get the data: the input matrix and the right output.
X = np.float32(eo['iris'])
Y_right = eo['pdist-chebychev-iris']
Y_test1 = pdist(X, 'chebychev')
if verbose > 2:
print("chebychev-iris", np.abs(Y_test1 - Y_right).max())
self.assertTrue(within_tol(Y_test1, Y_right, eps))
def test_pdist_chebychev_iris_nonC(self):
"Tests pdist(X, 'test_chebychev') [the non-C implementation] on the Iris data set."
eps = 1e-15
# Get the data: the input matrix and the right output.
X = eo['iris']
Y_right = eo['pdist-chebychev-iris']
Y_test2 = pdist(X, 'test_chebychev')
#print "test-chebychev-iris", np.abs(Y_test2 - Y_right).max()
self.assertTrue(within_tol(Y_test2, Y_right, eps))
def test_pdist_matching_mtica1(self):
"Tests matching(*,*) with mtica example #1 (nums)."
m = matching(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = matching(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
self.assertTrue(np.abs(m - 0.6) <= 1e-10)
self.assertTrue(np.abs(m2 - 0.6) <= 1e-10)
def test_pdist_matching_mtica2(self):
"Tests matching(*,*) with mtica example #2."
m = matching(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = matching(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
self.assertTrue(np.abs(m - (2.0/3.0)) <= 1e-10)
self.assertTrue(np.abs(m2 - (2.0/3.0)) <= 1e-10)
def test_pdist_matching_match(self):
"Tests pdist(X, 'matching') to see if the two implementations match on random boolean input data."
D = eo['random-bool-data']
B = np.bool_(D)
if verbose > 2:
print(B.shape, B.dtype)
eps = 1e-10
y1 = pdist(B, "matching")
y2 = pdist(B, "test_matching")
y3 = pdist(D, "test_matching")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y1-y3).max())
self.assertTrue(within_tol(y1, y2, eps))
self.assertTrue(within_tol(y2, y3, eps))
def test_pdist_jaccard_mtica1(self):
"Tests jaccard(*,*) with mtica example #1."
m = jaccard(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = jaccard(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
self.assertTrue(np.abs(m - 0.6) <= 1e-10)
self.assertTrue(np.abs(m2 - 0.6) <= 1e-10)
def test_pdist_jaccard_mtica2(self):
"Tests jaccard(*,*) with mtica example #2."
m = jaccard(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = jaccard(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
self.assertTrue(np.abs(m - (2.0/3.0)) <= 1e-10)
self.assertTrue(np.abs(m2 - (2.0/3.0)) <= 1e-10)
def test_pdist_jaccard_match(self):
"Tests pdist(X, 'jaccard') to see if the two implementations match on random double input data."
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "jaccard")
y2 = pdist(D, "test_jaccard")
y3 = pdist(np.bool_(D), "test_jaccard")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
self.assertTrue(within_tol(y1, y2, eps))
self.assertTrue(within_tol(y2, y3, eps))
def test_pdist_yule_mtica1(self):
"Tests yule(*,*) with mtica example #1."
m = yule(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = yule(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
if verbose > 2:
print(m)
self.assertTrue(np.abs(m - 2.0) <= 1e-10)
self.assertTrue(np.abs(m2 - 2.0) <= 1e-10)
def test_pdist_yule_mtica2(self):
"Tests yule(*,*) with mtica example #2."
m = yule(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = yule(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
if verbose > 2:
print(m)
self.assertTrue(np.abs(m - 2.0) <= 1e-10)
self.assertTrue(np.abs(m2 - 2.0) <= 1e-10)
def test_pdist_yule_match(self):
"Tests pdist(X, 'yule') to see if the two implementations match on random double input data."
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "yule")
y2 = pdist(D, "test_yule")
y3 = pdist(np.bool_(D), "test_yule")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
self.assertTrue(within_tol(y1, y2, eps))
self.assertTrue(within_tol(y2, y3, eps))
def test_pdist_dice_mtica1(self):
"Tests dice(*,*) with mtica example #1."
m = dice(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = dice(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
if verbose > 2:
print(m)
self.assertTrue(np.abs(m - (3.0/7.0)) <= 1e-10)
self.assertTrue(np.abs(m2 - (3.0/7.0)) <= 1e-10)
def test_pdist_dice_mtica2(self):
"Tests dice(*,*) with mtica example #2."
m = dice(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = dice(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
if verbose > 2:
print(m)
self.assertTrue(np.abs(m - 0.5) <= 1e-10)
self.assertTrue(np.abs(m2 - 0.5) <= 1e-10)
def test_pdist_dice_match(self):
"Tests pdist(X, 'dice') to see if the two implementations match on random double input data."
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "dice")
y2 = pdist(D, "test_dice")
y3 = pdist(D, "test_dice")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
self.assertTrue(within_tol(y1, y2, eps))
self.assertTrue(within_tol(y2, y3, eps))
def test_pdist_sokalsneath_mtica1(self):
"Tests sokalsneath(*,*) with mtica example #1."
m = sokalsneath(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = sokalsneath(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
if verbose > 2:
print(m)
self.assertTrue(np.abs(m - (3.0/4.0)) <= 1e-10)
self.assertTrue(np.abs(m2 - (3.0/4.0)) <= 1e-10)
def test_pdist_sokalsneath_mtica2(self):
"Tests sokalsneath(*,*) with mtica example #2."
m = sokalsneath(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = sokalsneath(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
if verbose > 2:
print(m)
self.assertTrue(np.abs(m - (4.0/5.0)) <= 1e-10)
self.assertTrue(np.abs(m2 - (4.0/5.0)) <= 1e-10)
def test_pdist_sokalsneath_match(self):
"Tests pdist(X, 'sokalsneath') to see if the two implementations match on random double input data."
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "sokalsneath")
y2 = pdist(D, "test_sokalsneath")
y3 = pdist(np.bool_(D), "test_sokalsneath")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
self.assertTrue(within_tol(y1, y2, eps))
self.assertTrue(within_tol(y2, y3, eps))
def test_pdist_rogerstanimoto_mtica1(self):
"Tests rogerstanimoto(*,*) with mtica example #1."
m = rogerstanimoto(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = rogerstanimoto(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
if verbose > 2:
print(m)
self.assertTrue(np.abs(m - (3.0/4.0)) <= 1e-10)
self.assertTrue(np.abs(m2 - (3.0/4.0)) <= 1e-10)
def test_pdist_rogerstanimoto_mtica2(self):
"Tests rogerstanimoto(*,*) with mtica example #2."
m = rogerstanimoto(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = rogerstanimoto(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
if verbose > 2:
print(m)
self.assertTrue(np.abs(m - (4.0/5.0)) <= 1e-10)
self.assertTrue(np.abs(m2 - (4.0/5.0)) <= 1e-10)
def test_pdist_rogerstanimoto_match(self):
"Tests pdist(X, 'rogerstanimoto') to see if the two implementations match on random double input data."
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "rogerstanimoto")
y2 = pdist(D, "test_rogerstanimoto")
y3 = pdist(np.bool_(D), "test_rogerstanimoto")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
self.assertTrue(within_tol(y1, y2, eps))
self.assertTrue(within_tol(y2, y3, eps))
def test_pdist_russellrao_mtica1(self):
"Tests russellrao(*,*) with mtica example #1."
m = russellrao(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = russellrao(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
if verbose > 2:
print(m)
self.assertTrue(np.abs(m - (3.0/5.0)) <= 1e-10)
self.assertTrue(np.abs(m2 - (3.0/5.0)) <= 1e-10)
def test_pdist_russellrao_mtica2(self):
"Tests russellrao(*,*) with mtica example #2."
m = russellrao(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = russellrao(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
if verbose > 2:
print(m)
self.assertTrue(np.abs(m - (2.0/3.0)) <= 1e-10)
self.assertTrue(np.abs(m2 - (2.0/3.0)) <= 1e-10)
def test_pdist_russellrao_match(self):
"Tests pdist(X, 'russellrao') to see if the two implementations match on random double input data."
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "russellrao")
y2 = pdist(D, "test_russellrao")
y3 = pdist(np.bool_(D), "test_russellrao")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
self.assertTrue(within_tol(y1, y2, eps))
self.assertTrue(within_tol(y2, y3, eps))
def test_pdist_sokalmichener_match(self):
"Tests pdist(X, 'sokalmichener') to see if the two implementations match on random double input data."
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "sokalmichener")
y2 = pdist(D, "test_sokalmichener")
y3 = pdist(np.bool_(D), "test_sokalmichener")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
self.assertTrue(within_tol(y1, y2, eps))
self.assertTrue(within_tol(y2, y3, eps))
def test_pdist_kulsinski_match(self):
"Tests pdist(X, 'kulsinski') to see if the two implementations match on random double input data."
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "kulsinski")
y2 = pdist(D, "test_kulsinski")
y3 = pdist(np.bool_(D), "test_kulsinski")
if verbose > 2:
print(np.abs(y1-y2).max())
self.assertTrue(within_tol(y1, y2, eps))
def test_pdist_canberra_match(self):
"Tests pdist(X, 'canberra') to see if the two implementations match on the Iris data set."
D = eo['iris']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "canberra")
y2 = pdist(D, "test_canberra")
if verbose > 2:
print(np.abs(y1-y2).max())
self.assertTrue(within_tol(y1, y2, eps))
def test_pdist_canberra_ticket_711(self):
"Tests pdist(X, 'canberra') to see if Canberra gives the right result as reported in Scipy bug report 711."
eps = 1e-8
pdist_y = pdist(([3.3], [3.4]), "canberra")
right_y = 0.01492537
if verbose > 2:
print(np.abs(pdist_y-right_y).max())
self.assertTrue(within_tol(pdist_y, right_y, eps))
def within_tol(a, b, tol):
return np.abs(a - b).max() < tol
class TestSomeDistanceFunctions(TestCase):
def setUp(self):
# 1D arrays
x = np.array([1.0, 2.0, 3.0])
y = np.array([1.0, 1.0, 5.0])
# 3x1 arrays
x31 = x[:,np.newaxis]
y31 = y[:,np.newaxis]
# 1x3 arrays
x13 = x31.T
y13 = y31.T
self.cases = [(x,y), (x31, y31), (x13, y13)]
def test_minkowski(self):
for x, y in self.cases:
dist1 = minkowski(x, y, p=1)
assert_almost_equal(dist1, 3.0)
dist1p5 = minkowski(x, y, p=1.5)
assert_almost_equal(dist1p5, (1.0+2.0**1.5)**(2./3))
dist2 = minkowski(x, y, p=2)
assert_almost_equal(dist2, np.sqrt(5))
def test_wminkowski(self):
w = np.array([1.0, 2.0, 0.5])
for x, y in self.cases:
dist1 = wminkowski(x, y, p=1, w=w)
assert_almost_equal(dist1, 3.0)
dist1p5 = wminkowski(x, y, p=1.5, w=w)
assert_almost_equal(dist1p5, (2.0**1.5+1.0)**(2./3))
dist2 = wminkowski(x, y, p=2, w=w)
assert_almost_equal(dist2, np.sqrt(5))
def test_euclidean(self):
for x, y in self.cases:
dist = euclidean(x, y)
assert_almost_equal(dist, np.sqrt(5))
def test_sqeuclidean(self):
for x, y in self.cases:
dist = sqeuclidean(x, y)
assert_almost_equal(dist, 5.0)
def test_cosine(self):
for x, y in self.cases:
dist = cosine(x, y)
assert_almost_equal(dist, 1.0 - 18.0/(np.sqrt(14)*np.sqrt(27)))
def test_correlation(self):
xm = np.array([-1.0, 0, 1.0])
ym = np.array([-4.0/3, -4.0/3, 5.0-7.0/3])
for x, y in self.cases:
dist = correlation(x, y)
assert_almost_equal(dist, 1.0 - np.dot(xm, ym)/(norm(xm)*norm(ym)))
def test_mahalanobis(self):
x = np.array([1.0, 2.0, 3.0])
y = np.array([1.0, 1.0, 5.0])
vi = np.array([[2.0, 1.0, 0.0],[1.0, 2.0, 1.0], [0.0, 1.0, 2.0]])
for x, y in self.cases:
dist = mahalanobis(x, y, vi)
assert_almost_equal(dist, np.sqrt(6.0))
class TestSquareForm(TestCase):
def test_squareform_empty_matrix(self):
"Tests squareform on an empty matrix."
A = np.zeros((0,0))
rA = squareform(np.array(A, dtype='double'))
self.assertTrue(rA.shape == (0,))
def test_squareform_empty_vector(self):
"Tests squareform on an empty vector."
v = np.zeros((0,))
rv = squareform(np.array(v, dtype='double'))
self.assertTrue(rv.shape == (1,1))
self.assertTrue(rv[0, 0] == 0)
def test_squareform_1by1_matrix(self):
"Tests squareform on a 1x1 matrix."
A = np.zeros((1,1))
rA = squareform(np.array(A, dtype='double'))
self.assertTrue(rA.shape == (0,))
def test_squareform_one_vector(self):
"Tests squareform on a 1-D array, length=1."
v = np.ones((1,)) * 8.3
rv = squareform(np.array(v, dtype='double'))
self.assertTrue(rv.shape == (2,2))
self.assertTrue(rv[0,1] == 8.3)
self.assertTrue(rv[1,0] == 8.3)
def test_squareform_one_binary_vector(self):
"""Tests squareform on a 1x1 binary matrix; conversion to double was
causing problems (see pull request 73)."""
v = np.ones((1,), dtype=np.bool)
rv = squareform(v)
self.assertTrue(rv.shape == (2,2))
self.assertTrue(rv[0,1])
def test_squareform_2by2_matrix(self):
"Tests squareform on a 2x2 matrix."
A = np.zeros((2,2))
A[0,1] = 0.8
A[1,0] = 0.8
rA = squareform(np.array(A, dtype='double'))
self.assertTrue(rA.shape == (1,))
self.assertTrue(rA[0] == 0.8)
def test_squareform_multi_matrix(self):
"Tests squareform on a square matrices of multiple sizes."
for n in xrange(2, 5):
yield self.check_squareform_multi_matrix(n)
def check_squareform_multi_matrix(self, n):
X = np.random.rand(n, 4)
Y = pdist(X)
self.assertTrue(len(Y.shape) == 1)
A = squareform(Y)
Yr = squareform(A)
s = A.shape
k = 0
if verbose >= 3:
print(A.shape, Y.shape, Yr.shape)
self.assertTrue(len(s) == 2)
self.assertTrue(len(Yr.shape) == 1)
self.assertTrue(s[0] == s[1])
for i in xrange(0, s[0]):
for j in xrange(i+1, s[1]):
if i != j:
#print i, j, k, A[i, j], Y[k]
self.assertTrue(A[i, j] == Y[k])
k += 1
else:
self.assertTrue(A[i, j] == 0)
class TestNumObsY(TestCase):
def test_num_obs_y_multi_matrix(self):
"Tests num_obs_y with observation matrices of multiple sizes."
for n in xrange(2, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
#print A.shape, Y.shape, Yr.shape
self.assertTrue(num_obs_y(Y) == n)
def test_num_obs_y_1(self):
"Tests num_obs_y(y) on a condensed distance matrix over 1 observations. Expecting exception."
self.assertRaises(ValueError, self.check_y, 1)
def test_num_obs_y_2(self):
"Tests num_obs_y(y) on a condensed distance matrix over 2 observations."
self.assertTrue(self.check_y(2))
def test_num_obs_y_3(self):
"Tests num_obs_y(y) on a condensed distance matrix over 3 observations."
self.assertTrue(self.check_y(3))
def test_num_obs_y_4(self):
"Tests num_obs_y(y) on a condensed distance matrix over 4 observations."
self.assertTrue(self.check_y(4))
def test_num_obs_y_5_10(self):
"Tests num_obs_y(y) on a condensed distance matrix between 5 and 15 observations."
for i in xrange(5, 16):
self.minit(i)
def test_num_obs_y_2_100(self):
"Tests num_obs_y(y) on 100 improper condensed distance matrices. Expecting exception."
a = set([])
for n in xrange(2, 16):
a.add(n*(n-1)/2)
for i in xrange(5, 105):
if i not in a:
self.assertRaises(ValueError, self.bad_y, i)
def minit(self, n):
self.assertTrue(self.check_y(n))
def bad_y(self, n):
y = np.random.rand(n)
return num_obs_y(y)
def check_y(self, n):
return num_obs_y(self.make_y(n)) == n
def make_y(self, n):
return np.random.rand((n * (n - 1)) // 2)
class TestNumObsDM(TestCase):
############## num_obs_dm
def test_num_obs_dm_multi_matrix(self):
"Tests num_obs_dm with observation matrices of multiple sizes."
for n in xrange(1, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
A = squareform(Y)
if verbose >= 3:
print(A.shape, Y.shape)
self.assertTrue(num_obs_dm(A) == n)
def test_num_obs_dm_0(self):
"Tests num_obs_dm(D) on a 0x0 distance matrix. Expecting exception."
self.assertTrue(self.check_D(0))
def test_num_obs_dm_1(self):
"Tests num_obs_dm(D) on a 1x1 distance matrix."
self.assertTrue(self.check_D(1))
def test_num_obs_dm_2(self):
"Tests num_obs_dm(D) on a 2x2 distance matrix."
self.assertTrue(self.check_D(2))
def test_num_obs_dm_3(self):
"Tests num_obs_dm(D) on a 3x3 distance matrix."
self.assertTrue(self.check_D(2))
def test_num_obs_dm_4(self):
"Tests num_obs_dm(D) on a 4x4 distance matrix."
self.assertTrue(self.check_D(4))
def check_D(self, n):
return num_obs_dm(self.make_D(n)) == n
def make_D(self, n):
return np.random.rand(n, n)
def is_valid_dm_throw(D):
return is_valid_dm(D, throw=True)
class TestIsValidDM(TestCase):
def test_is_valid_dm_int16_array_E(self):
"Tests is_valid_dm(*) on an int16 array. Exception expected."
D = np.zeros((5, 5), dtype='i')
self.assertRaises(TypeError, is_valid_dm_throw, (D))
def test_is_valid_dm_int16_array_F(self):
"Tests is_valid_dm(*) on an int16 array. False expected."
D = np.zeros((5, 5), dtype='i')
self.assertTrue(is_valid_dm(D) == False)
def test_is_valid_dm_improper_shape_1D_E(self):
"Tests is_valid_dm(*) on a 1D array. Exception expected."
D = np.zeros((5,), dtype=np.double)
self.assertRaises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_improper_shape_1D_F(self):
"Tests is_valid_dm(*) on a 1D array. False expected."
D = np.zeros((5,), dtype=np.double)
self.assertTrue(is_valid_dm(D) == False)
def test_is_valid_dm_improper_shape_3D_E(self):
"Tests is_valid_dm(*) on a 3D array. Exception expected."
D = np.zeros((3,3,3), dtype=np.double)
self.assertRaises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_improper_shape_3D_F(self):
"Tests is_valid_dm(*) on a 3D array. False expected."
D = np.zeros((3,3,3), dtype=np.double)
self.assertTrue(is_valid_dm(D) == False)
def test_is_valid_dm_nonzero_diagonal_E(self):
"Tests is_valid_dm(*) on a distance matrix with a nonzero diagonal. Exception expected."
y = np.random.rand(10)
D = squareform(y)
for i in xrange(0, 5):
D[i, i] = 2.0
self.assertRaises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_nonzero_diagonal_F(self):
"Tests is_valid_dm(*) on a distance matrix with a nonzero diagonal. False expected."
y = np.random.rand(10)
D = squareform(y)
for i in xrange(0, 5):
D[i, i] = 2.0
self.assertTrue(is_valid_dm(D) == False)
def test_is_valid_dm_assymetric_E(self):
"Tests is_valid_dm(*) on an assymetric distance matrix. Exception expected."
y = np.random.rand(10)
D = squareform(y)
D[1,3] = D[3,1] + 1
self.assertRaises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_assymetric_F(self):
"Tests is_valid_dm(*) on an assymetric distance matrix. False expected."
y = np.random.rand(10)
D = squareform(y)
D[1,3] = D[3,1] + 1
self.assertTrue(is_valid_dm(D) == False)
def test_is_valid_dm_correct_1_by_1(self):
"Tests is_valid_dm(*) on a correct 1x1. True expected."
D = np.zeros((1,1), dtype=np.double)
self.assertTrue(is_valid_dm(D) == True)
def test_is_valid_dm_correct_2_by_2(self):
"Tests is_valid_dm(*) on a correct 2x2. True expected."
y = np.random.rand(1)
D = squareform(y)
self.assertTrue(is_valid_dm(D) == True)
def test_is_valid_dm_correct_3_by_3(self):
"Tests is_valid_dm(*) on a correct 3x3. True expected."
y = np.random.rand(3)
D = squareform(y)
self.assertTrue(is_valid_dm(D) == True)
def test_is_valid_dm_correct_4_by_4(self):
"Tests is_valid_dm(*) on a correct 4x4. True expected."
y = np.random.rand(6)
D = squareform(y)
self.assertTrue(is_valid_dm(D) == True)
def test_is_valid_dm_correct_5_by_5(self):
"Tests is_valid_dm(*) on a correct 5x5. True expected."
y = np.random.rand(10)
D = squareform(y)
self.assertTrue(is_valid_dm(D) == True)
def is_valid_y_throw(y):
return is_valid_y(y, throw=True)
class TestIsValidY(TestCase):
def test_is_valid_y_int16_array_E(self):
"Tests is_valid_y(*) on an int16 array. Exception expected."
y = np.zeros((10,), dtype='i')
self.assertRaises(TypeError, is_valid_y_throw, (y))
def test_is_valid_y_int16_array_F(self):
"Tests is_valid_y(*) on an int16 array. False expected."
y = np.zeros((10,), dtype='i')
self.assertTrue(is_valid_y(y) == False)
def test_is_valid_y_improper_shape_2D_E(self):
"Tests is_valid_y(*) on a 2D array. Exception expected."
y = np.zeros((3,3,), dtype=np.double)
self.assertRaises(ValueError, is_valid_y_throw, (y))
def test_is_valid_y_improper_shape_2D_F(self):
"Tests is_valid_y(*) on a 2D array. False expected."
y = np.zeros((3,3,), dtype=np.double)
self.assertTrue(is_valid_y(y) == False)
def test_is_valid_y_improper_shape_3D_E(self):
"Tests is_valid_y(*) on a 3D array. Exception expected."
y = np.zeros((3,3,3), dtype=np.double)
self.assertRaises(ValueError, is_valid_y_throw, (y))
def test_is_valid_y_improper_shape_3D_F(self):
"Tests is_valid_y(*) on a 3D array. False expected."
y = np.zeros((3,3,3), dtype=np.double)
self.assertTrue(is_valid_y(y) == False)
def test_is_valid_y_correct_2_by_2(self):
"Tests is_valid_y(*) on a correct 2x2 condensed. True expected."
y = self.correct_n_by_n(2)
self.assertTrue(is_valid_y(y) == True)
def test_is_valid_y_correct_3_by_3(self):
"Tests is_valid_y(*) on a correct 3x3 condensed. True expected."
y = self.correct_n_by_n(3)
self.assertTrue(is_valid_y(y) == True)
def test_is_valid_y_correct_4_by_4(self):
"Tests is_valid_y(*) on a correct 4x4 condensed. True expected."
y = self.correct_n_by_n(4)
self.assertTrue(is_valid_y(y) == True)
def test_is_valid_y_correct_5_by_5(self):
"Tests is_valid_y(*) on a correct 5x5 condensed. True expected."
y = self.correct_n_by_n(5)
self.assertTrue(is_valid_y(y) == True)
def test_is_valid_y_2_100(self):
"Tests is_valid_y(*) on 100 improper condensed distance matrices. Expecting exception."
a = set([])
for n in xrange(2, 16):
a.add(n*(n-1)/2)
for i in xrange(5, 105):
if i not in a:
self.assertRaises(ValueError, self.bad_y, i)
def bad_y(self, n):
y = np.random.rand(n)
return is_valid_y(y, throw=True)
def correct_n_by_n(self, n):
y = np.random.rand((n * (n - 1)) // 2)
return y
def test_bad_p():
"""Raise ValueError if p < 1."""
p = 0.5
assert_raises(ValueError, minkowski, [1, 2], [3, 4], p)
assert_raises(ValueError, wminkowski, [1, 2], [3, 4], p, [1, 1])
def test_sokalsneath_all_false():
"""Regression test for ticket #876"""
assert_raises(ValueError, sokalsneath, [False, False, False], [False, False, False])
def test_canberra():
"""Regression test for ticket #1430."""
assert_equal(canberra([1,2,3], [2,4,6]), 1)
assert_equal(canberra([1,1,0,0], [1,0,1,0]), 2)
def test_braycurtis():
"""Regression test for ticket #1430."""
assert_almost_equal(braycurtis([1,2,3], [2,4,6]), 1./3, decimal=15)
assert_almost_equal(braycurtis([1,1,0,0], [1,0,1,0]), 0.5, decimal=15)
def test_euclideans():
"""Regression test for ticket #1328."""
x1 = np.array([1, 1, 1])
x2 = np.array([0, 0, 0])
# Basic test of the calculation.
assert_almost_equal(sqeuclidean(x1, x2), 3.0, decimal=14)
assert_almost_equal(euclidean(x1, x2), np.sqrt(3), decimal=14)
# Check flattening for (1, N) or (N, 1) inputs
assert_almost_equal(euclidean(x1[np.newaxis, :], x2[np.newaxis, :]),
np.sqrt(3), decimal=14)
assert_almost_equal(sqeuclidean(x1[np.newaxis, :], x2[np.newaxis, :]),
3.0, decimal=14)
assert_almost_equal(sqeuclidean(x1[:, np.newaxis], x2[:, np.newaxis]),
3.0, decimal=14)
# Distance metrics only defined for vectors (= 1-D)
x = np.arange(4).reshape(2, 2)
assert_raises(ValueError, euclidean, x, x)
assert_raises(ValueError, sqeuclidean, x, x)
# Another check, with random data.
rs = np.random.RandomState(1234567890)
x = rs.rand(10)
y = rs.rand(10)
d1 = euclidean(x, y)
d2 = sqeuclidean(x, y)
assert_almost_equal(d1**2, d2, decimal=14)
def test_sokalmichener():
"""Test that sokalmichener has the same result for bool and int inputs."""
p = [True, True, False]
q = [True, False, True]
x = [int(b) for b in p]
y = [int(b) for b in q]
dist1 = sokalmichener(p, q)
dist2 = sokalmichener(x, y)
# These should be exactly the same.
assert_equal(dist1, dist2)
def test__validate_vector():
"""Assorted tests for _validate_vector."""
x = [1, 2, 3]
y = _validate_vector(x)
assert_array_equal(y, x)
y = _validate_vector(x, dtype=np.float64)
assert_array_equal(y, x)
assert_equal(y.dtype, np.float64)
x = [1]
y = _validate_vector(x)
assert_equal(y.ndim, 1)
assert_equal(y, x)
x = 1
y = _validate_vector(x)
assert_equal(y.ndim, 1)
assert_equal(y, [x])
x = np.arange(5).reshape(1, -1, 1)
y = _validate_vector(x)
assert_equal(y.ndim, 1)
assert_array_equal(y, x[0, :, 0])
x = [[1, 2], [3, 4]]
assert_raises(ValueError, _validate_vector, x)
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
elkingtonmcb/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 259 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <hui.kian.ho@gmail.com>
# Gilles Louppe <g.louppe@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
Lemma1/MAC-POSTS | doc_builder/sphinx-contrib/feed/sphinxcontrib/feed/fsdict.py | 5 | 3256 | # −*− coding: UTF−8 −*−
from path import path
import os
import pickle
"""
A class providing dictionary access to a folder.
cribbed from http://bitbucket.org/howthebodyworks/fsdict
"""
def get_tmp_dir():
import tempfile
return tempfile.mkdtemp()
class FSDict(dict):
"""
provide dictionary access to a temp dir. I don't know why i didn't just use
shelve. I think I forgot it existed.
N.B. the keys ordering here is FS-dependent and thus unlike to be the same as
with a real dict. beware.
"""
unclean_dirs = []
def __init__(self, initval=[], work_dir=None, *args, **kwargs):
if work_dir is None:
work_dir = get_tmp_dir()
self.work_dir = path(work_dir)
if not self.work_dir.exists():
self.work_dir.mkdir()
for key, val in getattr(initval, 'iteritems', initval.__iter__)():
self[key] = val
self.unclean_dirs.append(self.work_dir)
super(FSDict, self).__init__(*args, **kwargs)
def __setitem__(self, key, val, *args, **kwargs):
pickle.dump(val, open(self.work_dir/key, 'w'))
def __getitem__(self, key, *args, **kwargs):
return pickle.load(open(self.work_dir/key, 'r'))
def __repr__(self):
"""
a hardline list of everything in the dict. may be long.
"""
return repr(dict([(k, v) for k, v in self.iteritems()]))
def __str__(self):
"""
str is truncated somewhat.
"""
if len(self.keys()):
return '{' + repr(self.keys()[0]) + ':' + repr(self[self.keys()[0]]) + ', ...'
else:
return super(FSDict, self).__str__()
def keys(self, *args, **kwargs):
return [key for key in self.iterkeys()]
def iterkeys(self, *args, **kwargs):
for f in self.work_dir.files():
yield str(self.work_dir.relpathto(f))
def iteritems(self):
for key in self.iterkeys():
yield key, self[key]
def itervalues(self):
for key in self.iterkeys():
yield self[key]
def __delitem__(self, key, *args, **kwargs):
(self.work_dir/key).unlink()
def values(self, *args, **kwargs):
return [self[key] for key in self.keys()]
def cleanup(self):
self.work_dir.rmtree()
@classmethod
def cleanup_all(cls):
for fsd in cls.unclean_dirs:
try:
fsd.rmtree()
except OSError:
pass
def move(self, new_dir):
try:
self.work_dir.move(new_dir)
except Exception, e:
raise
else:
self.work_dir = new_dir
def __eq__(self, other):
"""
when compared to a dict, equate equal if all keys and vals are equal
note, this is potentially expensive.
"""
#duck type our way to sanity:
if not hasattr(other, 'keys'): return False
#OK, it's a dict-ish thing
try:
return all([self[key]==other[key] for key in other]) and \
len(self.keys())==len(other.keys())
except KeyError:
return False | mit |
Alwnikrotikz/secure-gappproxy | localproxy/cxsetup.py | 2 | 1671 | # A simple setup script to create an executable running wxPython. This also
# demonstrates the method for creating a Windows executable that does not have
# an associated console.
#
# wxapp.py is a very simple "Hello, world" type wxPython application
#
# Run the build process by running the command 'python setup.py build'
#
# If everything works well you should find a subdirectory in the build
# subdirectory that contains the files needed to run the application
import sys
import common
from cx_Freeze import setup, Executable
base = None
extension = ''
if sys.platform == "win32":
base = "Win32GUI"
extension = '.exe'
buildOptions = dict(
compressed = True,
optimize = 2,
create_shared_zip = True,
include_files =[('image/', 'image/'),
('cert_default/', 'cert_default/'),
('proxy.conf', 'proxy.conf'),
], )
setup(
name = "Secure GAppProxy",
version = common.VERSION,
description = "A Branch of GAppProxy For Security Paranoia",
options = dict(build_exe = buildOptions),
executables = [Executable("gui.py",
icon='image/logo.ico',
base = base,
targetName='proxy_gui%s'%extension,
),
Executable("console.py",
base = 'Console',
targetName='proxy_console%s'%extension,
),
])
| gpl-3.0 |
GdZ/scriptfile | software/googleAppEngine/lib/django_1_2/tests/regressiontests/views/views.py | 38 | 2055 | import sys
from django import forms
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import get_resolver
from django.shortcuts import render_to_response
from django.template import TemplateDoesNotExist
from django.views.debug import technical_500_response
from django.views.generic.create_update import create_object
from regressiontests.views import BrokenException, except_args
from models import Article
def index_page(request):
"""Dummy index page"""
return HttpResponse('<html><body>Dummy page</body></html>')
def custom_create(request):
"""
Calls create_object generic view with a custom form class.
"""
class SlugChangingArticleForm(forms.ModelForm):
"""Custom form class to overwrite the slug."""
class Meta:
model = Article
def save(self, *args, **kwargs):
self.instance.slug = 'some-other-slug'
return super(SlugChangingArticleForm, self).save(*args, **kwargs)
return create_object(request,
post_save_redirect='/views/create_update/view/article/%(slug)s/',
form_class=SlugChangingArticleForm)
def raises(request):
try:
raise Exception
except Exception:
return technical_500_response(request, *sys.exc_info())
def raises404(request):
resolver = get_resolver(None)
resolver.resolve('')
def redirect(request):
"""
Forces an HTTP redirect.
"""
return HttpResponseRedirect("target/")
def view_exception(request, n):
raise BrokenException(except_args[int(n)])
def template_exception(request, n):
return render_to_response('debug/template_exception.html',
{'arg': except_args[int(n)]})
def raises_template_does_not_exist(request):
# We need to inspect the HTML generated by the fancy 500 debug view but
# the test client ignores it, so we send it explicitly.
try:
return render_to_response('i_dont_exist.html')
except TemplateDoesNotExist:
return technical_500_response(request, *sys.exc_info())
| mit |
abaditsegay/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/email/base64mime.py | 54 | 5792 | # Copyright (C) 2002-2006 Python Software Foundation
# Author: Ben Gertzfield
# Contact: email-sig@python.org
"""Base64 content transfer encoding per RFCs 2045-2047.
This module handles the content transfer encoding method defined in RFC 2045
to encode arbitrary 8-bit data using the three 8-bit bytes in four 7-bit
characters encoding known as Base64.
It is used in the MIME standards for email to attach images, audio, and text
using some 8-bit character sets to messages.
This module provides an interface to encode and decode both headers and bodies
with Base64 encoding.
RFC 2045 defines a method for including character set information in an
`encoded-word' in a header. This method is commonly used for 8-bit real names
in To:, From:, Cc:, etc. fields, as well as Subject: lines.
This module does not do the line wrapping or end-of-line character conversion
necessary for proper internationalized headers; it only does dumb encoding and
decoding. To deal with the various line wrapping issues, use the email.Header
module.
"""
__all__ = [
'base64_len',
'body_decode',
'body_encode',
'decode',
'decodestring',
'encode',
'encodestring',
'header_encode',
]
from binascii import b2a_base64, a2b_base64
from email.utils import fix_eols
CRLF = '\r\n'
NL = '\n'
EMPTYSTRING = ''
# See also Charset.py
MISC_LEN = 7
# Helpers
def base64_len(s):
"""Return the length of s when it is encoded with base64."""
groups_of_3, leftover = divmod(len(s), 3)
# 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
# Thanks, Tim!
n = groups_of_3 * 4
if leftover:
n += 4
return n
def header_encode(header, charset='iso-8859-1', keep_eols=False,
maxlinelen=76, eol=NL):
"""Encode a single header line with Base64 encoding in a given charset.
Defined in RFC 2045, this Base64 encoding is identical to normal Base64
encoding, except that each line must be intelligently wrapped (respecting
the Base64 encoding), and subsequent lines must start with a space.
charset names the character set to use to encode the header. It defaults
to iso-8859-1.
End-of-line characters (\\r, \\n, \\r\\n) will be automatically converted
to the canonical email line separator \\r\\n unless the keep_eols
parameter is True (the default is False).
Each line of the header will be terminated in the value of eol, which
defaults to "\\n". Set this to "\\r\\n" if you are using the result of
this function directly in email.
The resulting string will be in the form:
"=?charset?b?WW/5ciBtYXp66XLrIHf8eiBhIGhhbXBzdGHuciBBIFlv+XIgbWF6euly?=\\n
=?charset?b?6yB3/HogYSBoYW1wc3Rh7nIgQkMgWW/5ciBtYXp66XLrIHf8eiBhIGhh?="
with each line wrapped at, at most, maxlinelen characters (defaults to 76
characters).
"""
# Return empty headers unchanged
if not header:
return header
if not keep_eols:
header = fix_eols(header)
# Base64 encode each line, in encoded chunks no greater than maxlinelen in
# length, after the RFC chrome is added in.
base64ed = []
max_encoded = maxlinelen - len(charset) - MISC_LEN
max_unencoded = max_encoded * 3 // 4
for i in range(0, len(header), max_unencoded):
base64ed.append(b2a_base64(header[i:i+max_unencoded]))
# Now add the RFC chrome to each encoded chunk
lines = []
for line in base64ed:
# Ignore the last character of each line if it is a newline
if line.endswith(NL):
line = line[:-1]
# Add the chrome
lines.append('=?%s?b?%s?=' % (charset, line))
# Glue the lines together and return it. BAW: should we be able to
# specify the leading whitespace in the joiner?
joiner = eol + ' '
return joiner.join(lines)
def encode(s, binary=True, maxlinelen=76, eol=NL):
"""Encode a string with base64.
Each line will be wrapped at, at most, maxlinelen characters (defaults to
76 characters).
If binary is False, end-of-line characters will be converted to the
canonical email end-of-line sequence \\r\\n. Otherwise they will be left
verbatim (this is the default).
Each line of encoded text will end with eol, which defaults to "\\n". Set
this to "\r\n" if you will be using the result of this function directly
in an email.
"""
if not s:
return s
if not binary:
s = fix_eols(s)
encvec = []
max_unencoded = maxlinelen * 3 // 4
for i in range(0, len(s), max_unencoded):
# BAW: should encode() inherit b2a_base64()'s dubious behavior in
# adding a newline to the encoded string?
enc = b2a_base64(s[i:i + max_unencoded])
if enc.endswith(NL) and eol != NL:
enc = enc[:-1] + eol
encvec.append(enc)
return EMPTYSTRING.join(encvec)
# For convenience and backwards compatibility w/ standard base64 module
body_encode = encode
encodestring = encode
def decode(s, convert_eols=None):
"""Decode a raw base64 string.
If convert_eols is set to a string value, all canonical email linefeeds,
e.g. "\\r\\n", in the decoded text will be converted to the value of
convert_eols. os.linesep is a good choice for convert_eols if you are
decoding a text attachment.
This function does not parse a full MIME header value encoded with
base64 (like =?iso-8895-1?b?bmloISBuaWgh?=) -- please use the high
level email.Header class for that functionality.
"""
if not s:
return s
dec = a2b_base64(s)
if convert_eols:
return dec.replace(CRLF, convert_eols)
return dec
# For convenience and backwards compatibility w/ standard base64 module
body_decode = decode
decodestring = decode
| apache-2.0 |
ulif/ulif.openoffice | tests/conftest.py | 1 | 5507 | import logging
import os
import py.path
import pytest
import subprocess
import sys
import tempfile
import time
from py.io import TextIO
from ulif.openoffice import oooctl
from ulif.openoffice.oooctl import check_port
from ulif.openoffice.testing import envpath_wo_virtualenvs
@pytest.fixture(scope='session')
def tmpdir_sess(request):
"""return a temporary py.path.local object which is unique
for each test run (scope: session).
Different to `tmpdir`, the path is removed immediately after test
session.
"""
sess_dir = py.path.local(tempfile.mkdtemp())
request.addfinalizer(lambda: sess_dir.remove(rec=1))
return sess_dir
@pytest.fixture(scope='session')
def monkeypatch_sess(request):
"""Like `monkeypatch` fixture, but for sessions.
"""
from _pytest import monkeypatch
if pytest.__version__ < "3":
mpatch = monkeypatch.monkeypatch()
else:
mpatch = monkeypatch.monkeypatch(request)
request.addfinalizer(mpatch.undo)
return mpatch
@pytest.fixture(scope="session", autouse=True)
def envpath_no_venv(request, monkeypatch_sess):
"""Strip virtualenv path from system environment $PATH (scope: session).
For the test remove virtualenv path from $PATH.
We use this fixture here to ensure that virtualenvs can be used in
tests but do not interfere with `unoconv` path and needed libs.
In other words: with this fixture we can run tests in Python
versions, that normally do not support `uno` and other packages
needed by `unoconv`.
"""
new_path = envpath_wo_virtualenvs()
if not new_path:
return
monkeypatch_sess.setenv("PATH", new_path)
@pytest.fixture(scope="session")
def home(request, tmpdir_sess, monkeypatch_sess):
"""Provide a new $HOME (scope: session).
"""
new_home = tmpdir_sess.mkdir('home')
monkeypatch_sess.setenv('HOME', str(new_home))
return new_home
@pytest.fixture(scope="session")
def lo_server(request, home, tmpdir_sess, envpath_no_venv):
"""Start a libre office server (scope: session).
session-scoped test fixture. Sets new $HOME.
"""
if check_port("localhost", 2002):
return
script_path = os.path.splitext(oooctl.__file__)[0]
log_path = tmpdir_sess.join("loctl.log")
cmd = "%s %s.py --stdout=%s start" % (
sys.executable, script_path, log_path)
# It would be nice, to work w/o shell here.
proc = subprocess.Popen(cmd, shell=True)
proc.wait()
ts = time.time()
nap = 0.1
while not check_port('localhost', 2002):
time.sleep(nap)
nap = nap * 2
if time.time() - ts > 3:
break
def stop_server():
cmd = "%s %s.py stop" % (sys.executable, script_path)
# It would be nice, to work w/o shell here.
proc = subprocess.Popen(cmd, shell=True)
proc.wait()
ts = time.time()
nap = 0.1
while check_port('localhost', 2002):
time.sleep(nap)
nap = nap * 2
if time.time() - ts > 3:
break
request.addfinalizer(stop_server)
return proc
@pytest.fixture(scope="function")
def workdir(request, tmpdir, monkeypatch):
"""Provide a working dir (scope: function).
Creates a temporary directory with subdirs 'src/', 'cache/', and
'tmp/'. In 'src/sample.txt' a simple text file is created.
The system working directory is changed to the temporary dir during
test.
Global root temporary dir is set to the newly created 'tmp/' dir
during test.
"""
tmpdir.mkdir('src')
tmpdir.mkdir('cache')
tmpdir.mkdir('tmp')
tmpdir.join('src').join('sample.txt').write('Hi there!')
monkeypatch.chdir(tmpdir)
monkeypatch.setattr(tempfile, 'tempdir', str(tmpdir.join('tmp')))
return tmpdir
@pytest.fixture(scope="function")
def conv_logger(request):
"""`py.io.TextIO` stream capturing log messages (scope:funcion).
Captures messages to 'ulif.openoffice.convert' logger. Text can be
retrieved with `conv_logger.getvalue()`.
"""
stream = TextIO()
logger = logging.getLogger('ulif.openoffice.convert')
entry_level = logger.level
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(stream)
def cleanup():
logger.removeHandler(handler)
logger.setLevel(entry_level)
logger.addHandler(handler)
request.addfinalizer(cleanup)
return stream
@pytest.fixture(scope="function")
def samples_dir(request):
"""Get path of local samples dir (scope: function).
The path is delivered as `py.path.local` path for your
convenience.
"""
path = py.path.local(__file__).dirpath("input")
assert path.check() # make sure the path exists really
return path
@pytest.fixture(scope="function")
def conv_env(workdir, samples_dir):
"""Get the py.path local to a docconverter environment.
A converter environment contains a `workdir` which is returned.
The path contains additionally ``sample1.ini`` with content copied
from local ``inputs/sample1.ini``, a cache dir named ``cache`` and a
file ``paste.ini``, copied from ``input/sample2.ini`` and with all
cache dir references pointing to the local cache dir.
"""
workdir.join("sample1.ini").write(samples_dir.join("sample1.ini").read())
paste_conf2 = samples_dir.join("sample2.ini").read().replace(
"/tmp/mycache", str(workdir / "cache"))
workdir.join("paste.ini").write(paste_conf2)
return workdir
| gpl-2.0 |
torinfs/Py_Chem | response.py | 1 | 4641 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Function to build the A and B response matrices
# Torin Stetina
# June 1st, 2017
import numpy as np
def spin_eri(eriMO, sdim):
# ** Original algorithm based on function
# ** from joshuagoings.com/2013/05/27/tdhf-cis-in-python/
#
# Makes spin adapted 2 electron integrals from
# eriMO in RHF that also can be represented as the
# double bar integral < pq || rs >
# WARNING: Converts to dirac notation
seri = np.zeros((sdim,sdim,sdim,sdim))
for p in range(0,sdim):
for q in range(0,sdim):
for r in range(0,sdim):
for s in range(0,sdim):
v1 = eriMO[p//2,r//2,q//2,s//2] * (p%2 == r%2) * (q%2 == s%2)
v2 = eriMO[p//2,s//2,q//2,r//2] * (p%2 == s%2) * (q%2 == r%2)
seri[p,q,r,s] = v1 - v2
return seri
def responseAB_RHF(eriMO, eps, Nelec, S):
# S = singlet (is a boolean)
# eriMO = MO transformed ERIs
# eps = orbital energies
# Nelec = # of electrons
dim = len(eriMO)
if not S: # Spin-Adapted (triplets)
sdim = 2*dim
seri = spin_eri(eriMO, sdim)
# Extend epsilon array for spin
spin_eps = np.zeros((sdim))
for i in range(0,sdim):
spin_eps[i] = eps[i//2]
spin_eps = np.diag(spin_eps)
A = np.zeros((Nelec*(sdim-Nelec),Nelec*(sdim-Nelec)))
B = np.zeros((Nelec*(sdim-Nelec),Nelec*(sdim-Nelec)))
# Compute A and B matrix elements
ia = -1
for i in range(0,Nelec):
for a in range(Nelec,sdim):
ia += 1
jb = -1
for j in range(0,Nelec):
for b in range(Nelec,sdim):
jb += 1
# A = (e_a - e_i) d_{ij} d{ab} * < aj || ib >
A[ia,jb] = (spin_eps[a,a] - spin_eps[i,i]) \
* (i == j) * (a == b) + seri[a,j,i,b]
# B = < ab || ij >
B[ia,jb] = seri[a,b,i,j]
elif S: # Singlets only
A = np.zeros((Nelec/2*(dim-Nelec/2),Nelec/2*(dim-Nelec/2)))
B = np.zeros((Nelec/2*(dim-Nelec/2),Nelec/2*(dim-Nelec/2)))
# Compute A and B matrix elements
ia = -1
for i in range(0,Nelec/2):
for a in range(Nelec/2,dim):
ia += 1
jb = -1
for j in range(0,Nelec/2):
for b in range(Nelec/2,dim):
jb += 1
# A = (e_a - e_i) d_{ij} d{ab} * < aj || ib >
# < aj || ib > = < aj | ib > - < aj | bi >
# = ( ai | jb ) - ( ab | ji )
A[ia,jb] = (eps[a] - eps[i]) \
* (i == j) * (a == b) + 2*eriMO[a,i,j,b] - eriMO[a,b,j,i]
# B = < ab || ij >
# < ab || ij > = < ab | ij > - < ab | ji >
# = ( ai | bj ) - ( aj | bi )
B[ia,jb] = 2*eriMO[a,i,b,j] - eriMO[a,j,b,i]
return A, B
def responseAB_UHF(eriMO, eps, Nelec):
# eriMO = MO transformed ERIs in Block form
# eps = [eps_a, eps_b]
# Nelec = [Na, Nb]
dim = len(eriMO[0])
# Compute A and B matrix elements
wx = -1
block_A = []
block_B = []
for w in range(2):
for x in range(2):
wx += 1
ia = -1
A = np.zeros((Nelec[w]*(dim-Nelec[w]),Nelec[x]*(dim-Nelec[x])))
B = np.zeros((Nelec[w]*(dim-Nelec[w]),Nelec[x]*(dim-Nelec[x])))
rg = [w,x]
for i in range(0,Nelec[rg[0]]):
for a in range(Nelec[rg[0]],dim):
ia += 1
jb = -1
for j in range(0,Nelec[rg[1]]):
for b in range(Nelec[rg[1]],dim):
jb += 1
# A = (e_a - e_i) d_{ij} d{ab} d{σσ'} + (aiσ|jbσ') - d{σσ'}(abσ|jiσ)
A[ia,jb] = (eps[w][a] - eps[w][i]) \
* (i == j) * (a == b) * (w == x) \
+ eriMO[wx][a,i,j,b] - (w == x) * eriMO[wx][a,b,j,i]
# B = (aiσ|bjσ') - d{σσ'}(ajσ|biσ)
B[ia,jb] = eriMO[wx][a,i,b,j] - (w == x)*eriMO[wx][a,j,b,i]
block_A.append(A)
block_B.append(B)
# Create full A and B block matrices
A = np.bmat([[block_A[0], block_A[1]],[block_A[2], block_A[3]]])
B = np.bmat([[block_B[0], block_B[1]],[block_B[2], block_B[3]]])
return A, B
def TDHF(eriMO, eps, Nelec, R):
# Get A and B matrices
A, B = responseAB_UHF(eriMO, eps, Nelec)
# Solve non-Hermetian eigenvalue problem
M = np.bmat([[A, B],[-B, -A]])
E_td, C_td = np.linalg.eig(M)
Energies = []
print 'Excitation Energies (TDHF) = '
for i in range(len(E_td)):
if E_td[i] > 0.00:
Energies.append(E_td[i])
Energies = sorted(Energies)
for i in range(len(Energies)):
print 27.211396132*Energies[i], 'eV'
| gpl-3.0 |
2014c2g5/2014c2 | exts/wsgi/static/Brython2.1.0-20140419-113919/Lib/xml/dom/xmlbuilder.py | 873 | 12377 | """Implementation of the DOM Level 3 'LS-Load' feature."""
import copy
import xml.dom
from xml.dom.NodeFilter import NodeFilter
__all__ = ["DOMBuilder", "DOMEntityResolver", "DOMInputSource"]
class Options:
"""Features object that has variables set for each DOMBuilder feature.
The DOMBuilder class uses an instance of this class to pass settings to
the ExpatBuilder class.
"""
# Note that the DOMBuilder class in LoadSave constrains which of these
# values can be set using the DOM Level 3 LoadSave feature.
namespaces = 1
namespace_declarations = True
validation = False
external_parameter_entities = True
external_general_entities = True
external_dtd_subset = True
validate_if_schema = False
validate = False
datatype_normalization = False
create_entity_ref_nodes = True
entities = True
whitespace_in_element_content = True
cdata_sections = True
comments = True
charset_overrides_xml_encoding = True
infoset = False
supported_mediatypes_only = False
errorHandler = None
filter = None
class DOMBuilder:
entityResolver = None
errorHandler = None
filter = None
ACTION_REPLACE = 1
ACTION_APPEND_AS_CHILDREN = 2
ACTION_INSERT_AFTER = 3
ACTION_INSERT_BEFORE = 4
_legal_actions = (ACTION_REPLACE, ACTION_APPEND_AS_CHILDREN,
ACTION_INSERT_AFTER, ACTION_INSERT_BEFORE)
def __init__(self):
self._options = Options()
def _get_entityResolver(self):
return self.entityResolver
def _set_entityResolver(self, entityResolver):
self.entityResolver = entityResolver
def _get_errorHandler(self):
return self.errorHandler
def _set_errorHandler(self, errorHandler):
self.errorHandler = errorHandler
def _get_filter(self):
return self.filter
def _set_filter(self, filter):
self.filter = filter
def setFeature(self, name, state):
if self.supportsFeature(name):
state = state and 1 or 0
try:
settings = self._settings[(_name_xform(name), state)]
except KeyError:
raise xml.dom.NotSupportedErr(
"unsupported feature: %r" % (name,))
else:
for name, value in settings:
setattr(self._options, name, value)
else:
raise xml.dom.NotFoundErr("unknown feature: " + repr(name))
def supportsFeature(self, name):
return hasattr(self._options, _name_xform(name))
def canSetFeature(self, name, state):
key = (_name_xform(name), state and 1 or 0)
return key in self._settings
# This dictionary maps from (feature,value) to a list of
# (option,value) pairs that should be set on the Options object.
# If a (feature,value) setting is not in this dictionary, it is
# not supported by the DOMBuilder.
#
_settings = {
("namespace_declarations", 0): [
("namespace_declarations", 0)],
("namespace_declarations", 1): [
("namespace_declarations", 1)],
("validation", 0): [
("validation", 0)],
("external_general_entities", 0): [
("external_general_entities", 0)],
("external_general_entities", 1): [
("external_general_entities", 1)],
("external_parameter_entities", 0): [
("external_parameter_entities", 0)],
("external_parameter_entities", 1): [
("external_parameter_entities", 1)],
("validate_if_schema", 0): [
("validate_if_schema", 0)],
("create_entity_ref_nodes", 0): [
("create_entity_ref_nodes", 0)],
("create_entity_ref_nodes", 1): [
("create_entity_ref_nodes", 1)],
("entities", 0): [
("create_entity_ref_nodes", 0),
("entities", 0)],
("entities", 1): [
("entities", 1)],
("whitespace_in_element_content", 0): [
("whitespace_in_element_content", 0)],
("whitespace_in_element_content", 1): [
("whitespace_in_element_content", 1)],
("cdata_sections", 0): [
("cdata_sections", 0)],
("cdata_sections", 1): [
("cdata_sections", 1)],
("comments", 0): [
("comments", 0)],
("comments", 1): [
("comments", 1)],
("charset_overrides_xml_encoding", 0): [
("charset_overrides_xml_encoding", 0)],
("charset_overrides_xml_encoding", 1): [
("charset_overrides_xml_encoding", 1)],
("infoset", 0): [],
("infoset", 1): [
("namespace_declarations", 0),
("validate_if_schema", 0),
("create_entity_ref_nodes", 0),
("entities", 0),
("cdata_sections", 0),
("datatype_normalization", 1),
("whitespace_in_element_content", 1),
("comments", 1),
("charset_overrides_xml_encoding", 1)],
("supported_mediatypes_only", 0): [
("supported_mediatypes_only", 0)],
("namespaces", 0): [
("namespaces", 0)],
("namespaces", 1): [
("namespaces", 1)],
}
def getFeature(self, name):
xname = _name_xform(name)
try:
return getattr(self._options, xname)
except AttributeError:
if name == "infoset":
options = self._options
return (options.datatype_normalization
and options.whitespace_in_element_content
and options.comments
and options.charset_overrides_xml_encoding
and not (options.namespace_declarations
or options.validate_if_schema
or options.create_entity_ref_nodes
or options.entities
or options.cdata_sections))
raise xml.dom.NotFoundErr("feature %s not known" % repr(name))
def parseURI(self, uri):
if self.entityResolver:
input = self.entityResolver.resolveEntity(None, uri)
else:
input = DOMEntityResolver().resolveEntity(None, uri)
return self.parse(input)
def parse(self, input):
options = copy.copy(self._options)
options.filter = self.filter
options.errorHandler = self.errorHandler
fp = input.byteStream
if fp is None and options.systemId:
import urllib.request
fp = urllib.request.urlopen(input.systemId)
return self._parse_bytestream(fp, options)
def parseWithContext(self, input, cnode, action):
if action not in self._legal_actions:
raise ValueError("not a legal action")
raise NotImplementedError("Haven't written this yet...")
def _parse_bytestream(self, stream, options):
import xml.dom.expatbuilder
builder = xml.dom.expatbuilder.makeBuilder(options)
return builder.parseFile(stream)
def _name_xform(name):
return name.lower().replace('-', '_')
class DOMEntityResolver(object):
__slots__ = '_opener',
def resolveEntity(self, publicId, systemId):
assert systemId is not None
source = DOMInputSource()
source.publicId = publicId
source.systemId = systemId
source.byteStream = self._get_opener().open(systemId)
# determine the encoding if the transport provided it
source.encoding = self._guess_media_encoding(source)
# determine the base URI is we can
import posixpath, urllib.parse
parts = urllib.parse.urlparse(systemId)
scheme, netloc, path, params, query, fragment = parts
# XXX should we check the scheme here as well?
if path and not path.endswith("/"):
path = posixpath.dirname(path) + "/"
parts = scheme, netloc, path, params, query, fragment
source.baseURI = urllib.parse.urlunparse(parts)
return source
def _get_opener(self):
try:
return self._opener
except AttributeError:
self._opener = self._create_opener()
return self._opener
def _create_opener(self):
import urllib.request
return urllib.request.build_opener()
def _guess_media_encoding(self, source):
info = source.byteStream.info()
if "Content-Type" in info:
for param in info.getplist():
if param.startswith("charset="):
return param.split("=", 1)[1].lower()
class DOMInputSource(object):
__slots__ = ('byteStream', 'characterStream', 'stringData',
'encoding', 'publicId', 'systemId', 'baseURI')
def __init__(self):
self.byteStream = None
self.characterStream = None
self.stringData = None
self.encoding = None
self.publicId = None
self.systemId = None
self.baseURI = None
def _get_byteStream(self):
return self.byteStream
def _set_byteStream(self, byteStream):
self.byteStream = byteStream
def _get_characterStream(self):
return self.characterStream
def _set_characterStream(self, characterStream):
self.characterStream = characterStream
def _get_stringData(self):
return self.stringData
def _set_stringData(self, data):
self.stringData = data
def _get_encoding(self):
return self.encoding
def _set_encoding(self, encoding):
self.encoding = encoding
def _get_publicId(self):
return self.publicId
def _set_publicId(self, publicId):
self.publicId = publicId
def _get_systemId(self):
return self.systemId
def _set_systemId(self, systemId):
self.systemId = systemId
def _get_baseURI(self):
return self.baseURI
def _set_baseURI(self, uri):
self.baseURI = uri
class DOMBuilderFilter:
"""Element filter which can be used to tailor construction of
a DOM instance.
"""
# There's really no need for this class; concrete implementations
# should just implement the endElement() and startElement()
# methods as appropriate. Using this makes it easy to only
# implement one of them.
FILTER_ACCEPT = 1
FILTER_REJECT = 2
FILTER_SKIP = 3
FILTER_INTERRUPT = 4
whatToShow = NodeFilter.SHOW_ALL
def _get_whatToShow(self):
return self.whatToShow
def acceptNode(self, element):
return self.FILTER_ACCEPT
def startContainer(self, element):
return self.FILTER_ACCEPT
del NodeFilter
class DocumentLS:
"""Mixin to create documents that conform to the load/save spec."""
async = False
def _get_async(self):
return False
def _set_async(self, async):
if async:
raise xml.dom.NotSupportedErr(
"asynchronous document loading is not supported")
def abort(self):
# What does it mean to "clear" a document? Does the
# documentElement disappear?
raise NotImplementedError(
"haven't figured out what this means yet")
def load(self, uri):
raise NotImplementedError("haven't written this yet")
def loadXML(self, source):
raise NotImplementedError("haven't written this yet")
def saveXML(self, snode):
if snode is None:
snode = self
elif snode.ownerDocument is not self:
raise xml.dom.WrongDocumentErr()
return snode.toxml()
class DOMImplementationLS:
MODE_SYNCHRONOUS = 1
MODE_ASYNCHRONOUS = 2
def createDOMBuilder(self, mode, schemaType):
if schemaType is not None:
raise xml.dom.NotSupportedErr(
"schemaType not yet supported")
if mode == self.MODE_SYNCHRONOUS:
return DOMBuilder()
if mode == self.MODE_ASYNCHRONOUS:
raise xml.dom.NotSupportedErr(
"asynchronous builders are not supported")
raise ValueError("unknown value for mode")
def createDOMWriter(self):
raise NotImplementedError(
"the writer interface hasn't been written yet!")
def createDOMInputSource(self):
return DOMInputSource()
| gpl-2.0 |
xodus7/tensorflow | tensorflow/contrib/opt/python/training/elastic_average_optimizer_test.py | 25 | 11942 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ElasticAverageOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import portpicker
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import device_setter
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import saver
from tensorflow.python.training import server_lib
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.contrib.opt.python.training.elastic_average_optimizer import \
ElasticAverageOptimizer, ElasticAverageCustomGetter, GLOBAL_VARIABLE_NAME
def create_local_cluster(num_workers, num_ps, protocol="grpc"):
"""Create local GRPC servers and return them."""
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs, job_name="worker", protocol=protocol, task_index=ix, start=True)
for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs, job_name="ps", protocol=protocol, task_index=ix, start=True)
for ix in range(num_ps)
]
return cluster_dict, workers, ps_servers
# Creates the workers and return their sessions, graphs, train_ops.
# Chief worker will update at last
def _get_workers(num_workers, period, workers, moving_rate, num_ps=1):
sessions = []
graphs = []
train_ops = []
savers = []
for worker_id in range(num_workers):
graph = ops.Graph()
is_chief = (worker_id == 0)
with graph.as_default():
worker_device = "/job:worker/task:%d/cpu:0" % (worker_id)
ea_custom = ElasticAverageCustomGetter(worker_device=worker_device)
with variable_scope.variable_scope(
"", custom_getter=ea_custom), ops.device(
device_setter.replica_device_setter(
worker_device=worker_device,
ps_device="/job:ps/task:0/cpu:0",
ps_tasks=1)):
global_step = training_util.get_or_create_global_step()
var_0 = variable_scope.get_variable(initializer=0.0, name="v0")
var_1 = variable_scope.get_variable(initializer=1.0, name="v1")
if num_ps > 1:
with variable_scope.variable_scope(
"",
partitioner=partitioned_variables.fixed_size_partitioner(
num_ps, axis=0),
custom_getter=ea_custom), ops.device(
device_setter.replica_device_setter(
worker_device=worker_device,
ps_device="/job:ps/task:0/cpu:0",
ps_tasks=num_ps)):
partition_var = variable_scope.get_variable(
'partition_var',
shape=[2, 4],
initializer=init_ops.ones_initializer)
part_0 = list(partition_var)[0]
part_1 = list(partition_var)[1]
with ops.device("/job:worker/task:" + str(worker_id)):
grads_0 = constant_op.constant(-1.0)
grads_1 = constant_op.constant(-1.0)
grads_part_0 = constant_op.constant([[-1., -1., -1., -1.]])
grads_part_1 = constant_op.constant([[-1., -1., -1., -1.]])
sgd_opt = gradient_descent.GradientDescentOptimizer(1.0)
opt = ElasticAverageOptimizer(
opt=sgd_opt,
num_worker=num_workers,
moving_rate=moving_rate,
communication_period=period,
ea_custom_getter=ea_custom)
if num_ps == 1:
train_op = [
opt.apply_gradients(([grads_0, var_0], [grads_1, var_1]),
global_step)
]
else:
train_op = [
opt.apply_gradients(([grads_0, var_0],
[grads_1, var_1],
[grads_part_0, part_0],
[grads_part_1, part_1]),
global_step)
]
easgd_hook = opt.make_session_run_hook(is_chief, worker_id)
saver = opt.swapping_saver()
# Creates MonitoredSession
sess = training.MonitoredTrainingSession(
workers[worker_id].target, hooks=[easgd_hook])
sessions.append(sess)
graphs.append(graph)
train_ops.append(train_op)
savers.append(saver)
return sessions, graphs, train_ops, savers
class ElasticAverageOptimizerTest(test.TestCase):
def _run(self, train_op, sess):
sess.run(train_op)
def test1Workers2Period(self):
num_workers = 1
communication_period = 2
num_ps = 1
cluster, workers, _ = create_local_cluster(
num_workers=num_workers, num_ps=num_ps)
sessions, graphs, train_ops, savers = _get_workers(
num_workers, communication_period, workers, 1.0)
var_0 = graphs[0].get_tensor_by_name("v0:0")
var_1 = graphs[0].get_tensor_by_name("v1:0")
global_step = training_util.get_global_step(graphs[0])
var_0_g = graphs[0].get_tensor_by_name(GLOBAL_VARIABLE_NAME + "/v0:0")
var_1_g = graphs[0].get_tensor_by_name(GLOBAL_VARIABLE_NAME + "/v1:0")
# Verify the initialized value.
self.assertAllEqual(0.0, sessions[0].run(var_0))
self.assertAllEqual(1.0, sessions[0].run(var_1))
self.assertAllEqual(0.0, sessions[0].run(var_0_g))
self.assertAllEqual(1.0, sessions[0].run(var_1_g))
self.assertAllEqual(0, sessions[0].run(global_step))
sessions[0].run(train_ops[0])
self.assertAllEqual(1.0, sessions[0].run(var_0))
self.assertAllEqual(2.0, sessions[0].run(var_1))
self.assertAllEqual(0.0, sessions[0].run(var_0_g))
self.assertAllEqual(1.0, sessions[0].run(var_1_g))
self.assertAllEqual(0, sessions[0].run(global_step))
# iteration 2, global variable update
sessions[0].run(train_ops[0])
self.assertAllEqual(0.0, sessions[0].run(var_0))
self.assertAllEqual(1.0, sessions[0].run(var_1))
self.assertAllEqual(2.0, sessions[0].run(var_0_g))
self.assertAllEqual(3.0, sessions[0].run(var_1_g))
self.assertAllEqual(1, sessions[0].run(global_step))
# iteration 3
sessions[0].run(train_ops[0])
self.assertAllEqual(1.0, sessions[0].run(var_0))
self.assertAllEqual(2.0, sessions[0].run(var_1))
self.assertAllEqual(2.0, sessions[0].run(var_0_g))
self.assertAllEqual(3.0, sessions[0].run(var_1_g))
self.assertAllEqual(1, sessions[0].run(global_step))
sessions[0].run(train_ops[0])
# save, data will be global value
outfile = os.path.join(test.get_temp_dir(), "model")
savers[0].save(sessions[0]._sess._sess._sess._sess,
save_path=outfile)
ops.reset_default_graph() # restore on a new graph
with session.Session() as sess:
v0 = variable_scope.get_variable(initializer=0.0, name="v0")
v1 = variable_scope.get_variable(initializer=1.0, name="v1")
sess.run(variables.local_variables_initializer())
saver_opt = saver.Saver(var_list=[v1, v0])
saver_opt.restore(sess, outfile)
self.assertAllEqual(2.0, sess.run(v0))
self.assertAllEqual(3.0, sess.run(v1))
def test2Worker1Period(self):
num_workers = 2
communication_period = 1
num_ps = 2
cluster, workers, _ = create_local_cluster(
num_workers=num_workers, num_ps=num_ps)
sessions, graphs, train_ops, savers = _get_workers(
num_workers, communication_period, workers, 0.5, num_ps=2)
var_0 = graphs[0].get_tensor_by_name("v0:0")
var_1 = graphs[0].get_tensor_by_name("v1:0")
var_0_1 = graphs[1].get_tensor_by_name("v0:0")
var_1_1 = graphs[1].get_tensor_by_name("v1:0")
var_0_g = graphs[0].get_tensor_by_name(GLOBAL_VARIABLE_NAME + "/v0:0")
var_1_g = graphs[0].get_tensor_by_name(GLOBAL_VARIABLE_NAME + "/v1:0")
part_0_g = graphs[0].get_tensor_by_name(
GLOBAL_VARIABLE_NAME + "/partition_var/part_0:0")
# Verify the initialized value.
self.assertAllEqual(0.0, sessions[0].run(var_0))
self.assertAllEqual(1.0, sessions[0].run(var_1))
self.assertAllEqual(0.0, sessions[1].run(var_0_1))
self.assertAllEqual(1.0, sessions[1].run(var_1_1))
self.assertAllEqual(0.0, sessions[0].run(var_0_g))
self.assertAllEqual(1.0, sessions[0].run(var_1_g))
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
self.assertAllEqual(0.5, sessions[0].run(var_0))
self.assertAllEqual(1.5, sessions[0].run(var_1))
self.assertAllEqual(0.75, sessions[0].run(var_0_g))
self.assertAllEqual(1.75, sessions[0].run(var_1_g))
self.assertAllEqual(0.75, sessions[1].run(var_0_1))
self.assertAllEqual(1.75, sessions[1].run(var_1_1))
# part_0 of global_center copy
part_0_g = sessions[0].run(part_0_g)
outfile = os.path.join(test.get_temp_dir(), "model")
savers[0].save(sessions[0]._sess._sess._sess._sess,
save_path=outfile)
# verify restore of partitioned_variables
ops.reset_default_graph() # restore on a new graph
g = ops.get_default_graph()
with session.Session() as sess, g.as_default():
with variable_scope.variable_scope(
"",
partitioner=partitioned_variables.fixed_size_partitioner(
num_ps, axis=0)):
partition_var = variable_scope.get_variable(
'partition_var',
shape=[2, 4],
initializer=init_ops.ones_initializer)
s = saver.Saver(var_list=[partition_var])
s.restore(sess, outfile)
part_0 = g.get_tensor_by_name('partition_var/part_0:0')
self.assertAllEqual(part_0_g, sess.run(part_0))
def testPS2TasksWithClusterSpecClass(self):
cluster_spec = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
})
ea_custom = ElasticAverageCustomGetter(worker_device="/job:worker/task:0")
from tensorflow.python.training import device_setter
with ops.device(
device_setter.replica_device_setter(cluster=cluster_spec,
worker_device="/job:worker/task:0",
ps_device="/job:ps")), \
variable_scope.variable_scope("", custom_getter=ea_custom):
v = variable_scope.get_variable(initializer=[1, 2], name="v")
w = variable_scope.get_variable(initializer=[2, 1], name="w")
v_g, w_g = ea_custom._global_map[v], ea_custom._global_map[w]
self.assertDeviceEqual("/job:worker/task:0", v.device)
self.assertDeviceEqual("job:ps/task:0", v_g.device)
self.assertDeviceEqual("/job:worker/task:0", w.device)
self.assertDeviceEqual("job:ps/task:1", w_g.device)
if __name__ == "__main__":
test.main()
| apache-2.0 |
indictranstech/internal-erpnext | erpnext/hr/report/monthly_salary_register/monthly_salary_register.py | 52 | 4104 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cstr
from frappe import msgprint, _
def execute(filters=None):
if not filters: filters = {}
salary_slips = get_salary_slips(filters)
columns, earning_types, ded_types = get_columns(salary_slips)
ss_earning_map = get_ss_earning_map(salary_slips)
ss_ded_map = get_ss_ded_map(salary_slips)
data = []
for ss in salary_slips:
row = [ss.employee, ss.employee_name, ss.branch, ss.department, ss.designation,
ss.company, ss.month, ss.leave_withut_pay, ss.payment_days]
for e in earning_types:
row.append(ss_earning_map.get(ss.name, {}).get(e))
row += [ss.arrear_amount, ss.leave_encashment_amount, ss.gross_pay]
for d in ded_types:
row.append(ss_ded_map.get(ss.name, {}).get(d))
row += [ss.total_deduction, ss.net_pay]
data.append(row)
return columns, data
def get_columns(salary_slips):
columns = [
_("Employee") + ":Link/Employee:120", _("Employee Name") + "::140", _("Branch") + ":Link/Branch:120",
_("Department") + ":Link/Department:120", _("Designation") + ":Link/Designation:120",
_("Company") + ":Link/Company:120", _("Month") + "::80", _("Leave Without Pay") + ":Float:130",
_("Payment Days") + ":Float:120"
]
earning_types = frappe.db.sql_list("""select distinct e_type from `tabSalary Slip Earning`
where ifnull(e_modified_amount, 0) != 0 and parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]))
ded_types = frappe.db.sql_list("""select distinct d_type from `tabSalary Slip Deduction`
where ifnull(d_modified_amount, 0) != 0 and parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]))
columns = columns + [(e + ":Currency:120") for e in earning_types] + \
["Arrear Amount:Currency:120", "Leave Encashment Amount:Currency:150",
"Gross Pay:Currency:120"] + [(d + ":Currency:120") for d in ded_types] + \
["Total Deduction:Currency:120", "Net Pay:Currency:120"]
return columns, earning_types, ded_types
def get_salary_slips(filters):
conditions, filters = get_conditions(filters)
salary_slips = frappe.db.sql("""select * from `tabSalary Slip` where docstatus = 1 %s
order by employee, month""" % conditions, filters, as_dict=1)
if not salary_slips:
msgprint(_("No salary slip found for month: ") + cstr(filters.get("month")) +
_(" and year: ") + cstr(filters.get("fiscal_year")), raise_exception=1)
return salary_slips
def get_conditions(filters):
conditions = ""
if filters.get("month"):
month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov",
"Dec"].index(filters["month"]) + 1
filters["month"] = month
conditions += " and month = %(month)s"
if filters.get("fiscal_year"): conditions += " and fiscal_year = %(fiscal_year)s"
if filters.get("company"): conditions += " and company = %(company)s"
if filters.get("employee"): conditions += " and employee = %(employee)s"
return conditions, filters
def get_ss_earning_map(salary_slips):
ss_earnings = frappe.db.sql("""select parent, e_type, e_modified_amount
from `tabSalary Slip Earning` where parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1)
ss_earning_map = {}
for d in ss_earnings:
ss_earning_map.setdefault(d.parent, frappe._dict()).setdefault(d.e_type, [])
ss_earning_map[d.parent][d.e_type] = flt(d.e_modified_amount)
return ss_earning_map
def get_ss_ded_map(salary_slips):
ss_deductions = frappe.db.sql("""select parent, d_type, d_modified_amount
from `tabSalary Slip Deduction` where parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1)
ss_ded_map = {}
for d in ss_deductions:
ss_ded_map.setdefault(d.parent, frappe._dict()).setdefault(d.d_type, [])
ss_ded_map[d.parent][d.d_type] = flt(d.d_modified_amount)
return ss_ded_map | agpl-3.0 |
proxysh/Safejumper-for-Desktop | buildlinux/env64/lib/python2.7/site-packages/twisted/trial/_dist/disttrial.py | 12 | 8711 | # -*- test-case-name: twisted.trial._dist.test.test_disttrial -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module contains the trial distributed runner, the management class
responsible for coordinating all of trial's behavior at the highest level.
@since: 12.3
"""
import os
import sys
from twisted.python.filepath import FilePath
from twisted.python.modules import theSystemPath
from twisted.internet.defer import DeferredList
from twisted.internet.task import cooperate
from twisted.trial.util import _unusedTestDirectory
from twisted.trial._asyncrunner import _iterateTests
from twisted.trial._dist.worker import LocalWorker, LocalWorkerAMP
from twisted.trial._dist.distreporter import DistReporter
from twisted.trial.reporter import UncleanWarningsReporterWrapper
from twisted.trial._dist import _WORKER_AMP_STDIN, _WORKER_AMP_STDOUT
class DistTrialRunner(object):
"""
A specialized runner for distributed trial. The runner launches a number of
local worker processes which will run tests.
@ivar _workerNumber: the number of workers to be spawned.
@type _workerNumber: C{int}
@ivar _stream: stream which the reporter will use.
@ivar _reporterFactory: the reporter class to be used.
"""
_distReporterFactory = DistReporter
def _makeResult(self):
"""
Make reporter factory, and wrap it with a L{DistReporter}.
"""
reporter = self._reporterFactory(self._stream, self._tbformat,
realtime=self._rterrors)
if self._uncleanWarnings:
reporter = UncleanWarningsReporterWrapper(reporter)
return self._distReporterFactory(reporter)
def __init__(self, reporterFactory, workerNumber, workerArguments,
stream=None,
tracebackFormat='default',
realTimeErrors=False,
uncleanWarnings=False,
logfile='test.log',
workingDirectory='_trial_temp'):
self._workerNumber = workerNumber
self._workerArguments = workerArguments
self._reporterFactory = reporterFactory
if stream is None:
stream = sys.stdout
self._stream = stream
self._tbformat = tracebackFormat
self._rterrors = realTimeErrors
self._uncleanWarnings = uncleanWarnings
self._result = None
self._workingDirectory = workingDirectory
self._logFile = logfile
self._logFileObserver = None
self._logFileObject = None
self._logWarnings = False
def writeResults(self, result):
"""
Write test run final outcome to result.
@param result: A C{TestResult} which will print errors and the summary.
"""
result.done()
def createLocalWorkers(self, protocols, workingDirectory):
"""
Create local worker protocol instances and return them.
@param protocols: An iterable of L{LocalWorkerAMP} instances.
@param workingDirectory: The base path in which we should run the
workers.
@type workingDirectory: C{str}
@return: A list of C{quantity} C{LocalWorker} instances.
"""
return [LocalWorker(protocol,
os.path.join(workingDirectory, str(x)),
self._logFile)
for x, protocol in enumerate(protocols)]
def launchWorkerProcesses(self, spawner, protocols, arguments):
"""
Spawn processes from a list of process protocols.
@param spawner: A C{IReactorProcess.spawnProcess} implementation.
@param protocols: An iterable of C{ProcessProtocol} instances.
@param arguments: Extra arguments passed to the processes.
"""
workertrialPath = theSystemPath[
'twisted.trial._dist.workertrial'].filePath.path
childFDs = {0: 'w', 1: 'r', 2: 'r', _WORKER_AMP_STDIN: 'w',
_WORKER_AMP_STDOUT: 'r'}
environ = os.environ.copy()
# Add an environment variable containing the raw sys.path, to be used by
# subprocesses to make sure it's identical to the parent. See
# workertrial._setupPath.
environ['TRIAL_PYTHONPATH'] = os.pathsep.join(sys.path)
for worker in protocols:
args = [sys.executable, workertrialPath]
args.extend(arguments)
spawner(worker, sys.executable, args=args, childFDs=childFDs,
env=environ)
def _driveWorker(self, worker, result, testCases, cooperate):
"""
Drive a L{LocalWorkerAMP} instance, iterating the tests and calling
C{run} for every one of them.
@param worker: The L{LocalWorkerAMP} to drive.
@param result: The global L{DistReporter} instance.
@param testCases: The global list of tests to iterate.
@param cooperate: The cooperate function to use, to be customized in
tests.
@type cooperate: C{function}
@return: A C{Deferred} firing when all the tests are finished.
"""
def resultErrback(error, case):
result.original.addFailure(case, error)
return error
def task(case):
d = worker.run(case, result)
d.addErrback(resultErrback, case)
return d
return cooperate(task(case) for case in testCases).whenDone()
def run(self, suite, reactor=None, cooperate=cooperate,
untilFailure=False):
"""
Spawn local worker processes and load tests. After that, run them.
@param suite: A tests suite to be run.
@param reactor: The reactor to use, to be customized in tests.
@type reactor: A provider of
L{twisted.internet.interfaces.IReactorProcess}
@param cooperate: The cooperate function to use, to be customized in
tests.
@type cooperate: C{function}
@param untilFailure: If C{True}, continue to run the tests until they
fail.
@type untilFailure: C{bool}.
@return: The test result.
@rtype: L{DistReporter}
"""
if reactor is None:
from twisted.internet import reactor
result = self._makeResult()
count = suite.countTestCases()
self._stream.write("Running %d tests.\n" % (count,))
if not count:
# Take a shortcut if there is no test
suite.run(result.original)
self.writeResults(result)
return result
testDir, testDirLock = _unusedTestDirectory(
FilePath(self._workingDirectory))
workerNumber = min(count, self._workerNumber)
ampWorkers = [LocalWorkerAMP() for x in range(workerNumber)]
workers = self.createLocalWorkers(ampWorkers, testDir.path)
processEndDeferreds = [worker.endDeferred for worker in workers]
self.launchWorkerProcesses(reactor.spawnProcess, workers,
self._workerArguments)
def runTests():
testCases = iter(list(_iterateTests(suite)))
workerDeferreds = []
for worker in ampWorkers:
workerDeferreds.append(
self._driveWorker(worker, result, testCases,
cooperate=cooperate))
return DeferredList(workerDeferreds, consumeErrors=True,
fireOnOneErrback=True)
stopping = []
def nextRun(ign):
self.writeResults(result)
if not untilFailure:
return
if not result.wasSuccessful():
return
d = runTests()
return d.addCallback(nextRun)
def stop(ign):
testDirLock.unlock()
if not stopping:
stopping.append(None)
reactor.stop()
def beforeShutDown():
if not stopping:
stopping.append(None)
d = DeferredList(processEndDeferreds, consumeErrors=True)
return d.addCallback(continueShutdown)
def continueShutdown(ign):
self.writeResults(result)
return ign
d = runTests()
d.addCallback(nextRun)
d.addBoth(stop)
reactor.addSystemEventTrigger('before', 'shutdown', beforeShutDown)
reactor.run()
return result
def runUntilFailure(self, suite):
"""
Run the tests with local worker processes until they fail.
@param suite: A tests suite to be run.
"""
return self.run(suite, untilFailure=True)
| gpl-2.0 |
ngsutils/ngsutils | ngsutils/fastq/t/test_split.py | 1 | 3450 | #!/usr/bin/env python
'''
Tests for fastqutils split
'''
import os
import unittest
import ngsutils.fastq.split
from ngsutils.fastq import FASTQ
class SplitTest(unittest.TestCase):
def testSplit(self):
fname = os.path.join(os.path.dirname(__file__), 'test.fastq')
templ = os.path.join(os.path.dirname(__file__), 'test_templ')
ngsutils.fastq.split.fastq_split(fname, templ, 2, quiet=True)
self.assertTrue(os.path.exists('%s.1.fastq' % templ))
self.assertTrue(os.path.exists('%s.2.fastq' % templ))
fq1 = FASTQ('%s.1.fastq' % templ)
fq2 = FASTQ('%s.2.fastq' % templ)
names1 = [x.fullname for x in fq1.fetch(quiet=True)]
self.assertEqual(names1, ['foo /1', 'foo /2', 'baz /1', 'baz /2'])
names2 = [x.fullname for x in fq2.fetch(quiet=True)]
self.assertEqual(names2, ['bar /1', 'bar /2'])
fq1.close()
fq2.close()
os.unlink('%s.1.fastq' % templ)
os.unlink('%s.2.fastq' % templ)
def testSplitGz(self):
fname = os.path.join(os.path.dirname(__file__), 'test.fastq')
templ = os.path.join(os.path.dirname(__file__), 'test_templ')
ngsutils.fastq.split.fastq_split(fname, templ, 2, gz=True, quiet=True)
self.assertTrue(os.path.exists('%s.1.fastq.gz' % templ))
self.assertTrue(os.path.exists('%s.2.fastq.gz' % templ))
os.unlink('%s.1.fastq.gz' % templ)
os.unlink('%s.2.fastq.gz' % templ)
def testSplitUnpaired(self):
fname = os.path.join(os.path.dirname(__file__), 'test.fastq')
templ = os.path.join(os.path.dirname(__file__), 'test_templ')
ngsutils.fastq.split.fastq_split(fname, templ, 2, ignore_pairs=True, quiet=True)
self.assertTrue(os.path.exists('%s.1.fastq' % templ))
self.assertTrue(os.path.exists('%s.2.fastq' % templ))
fq1 = FASTQ('%s.1.fastq' % templ)
fq2 = FASTQ('%s.2.fastq' % templ)
names1 = [x.name for x in fq1.fetch(quiet=True)]
self.assertEqual(names1, ['foo', 'bar', 'baz'])
names2 = [x.name for x in fq2.fetch(quiet=True)]
self.assertEqual(names2, ['foo', 'bar', 'baz'])
fq1.close()
fq2.close()
os.unlink('%s.1.fastq' % templ)
os.unlink('%s.2.fastq' % templ)
def testSplitThree(self):
fname = os.path.join(os.path.dirname(__file__), 'test.fastq')
templ = os.path.join(os.path.dirname(__file__), 'test_templ')
ngsutils.fastq.split.fastq_split(fname, templ, 3, ignore_pairs=True, quiet=True)
self.assertTrue(os.path.exists('%s.1.fastq' % templ))
self.assertTrue(os.path.exists('%s.2.fastq' % templ))
self.assertTrue(os.path.exists('%s.3.fastq' % templ))
fq1 = FASTQ('%s.1.fastq' % templ)
fq2 = FASTQ('%s.2.fastq' % templ)
fq3 = FASTQ('%s.3.fastq' % templ)
names1 = [x.fullname for x in fq1.fetch(quiet=True)]
self.assertEqual(names1, ['foo /1', 'bar /2'])
names2 = [x.fullname for x in fq2.fetch(quiet=True)]
self.assertEqual(names2, ['foo /2', 'baz /1'])
names3 = [x.fullname for x in fq3.fetch(quiet=True)]
self.assertEqual(names3, ['bar /1', 'baz /2'])
fq1.close()
fq2.close()
fq3.close()
os.unlink('%s.1.fastq' % templ)
os.unlink('%s.2.fastq' % templ)
os.unlink('%s.3.fastq' % templ)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
AndroidOpenDevelopment/android_external_chromium_org | chrome/browser/test_presubmit.py | 11 | 20952 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for Web Development Style Guide checker."""
import os
import re
import sys
import unittest
test_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.extend([
os.path.normpath(os.path.join(test_dir, '..', '..', 'tools')),
os.path.join(test_dir),
])
import find_depot_tools # pylint: disable=W0611
from testing_support.super_mox import SuperMoxTestBase
from web_dev_style import css_checker, js_checker # pylint: disable=F0401
class JsStyleGuideTest(SuperMoxTestBase):
def setUp(self):
SuperMoxTestBase.setUp(self)
input_api = self.mox.CreateMockAnything()
input_api.re = re
output_api = self.mox.CreateMockAnything()
self.checker = js_checker.JSChecker(input_api, output_api)
def GetHighlight(self, line, error):
"""Returns the substring of |line| that is highlighted in |error|."""
error_lines = error.split('\n')
highlight = error_lines[error_lines.index(line) + 1]
return ''.join(ch1 for (ch1, ch2) in zip(line, highlight) if ch2 == '^')
def ShouldFailConstCheck(self, line):
"""Checks that the 'const' checker flags |line| as a style error."""
error = self.checker.ConstCheck(1, line)
self.assertNotEqual('', error,
'Should be flagged as style error: ' + line)
self.assertEqual(self.GetHighlight(line, error), 'const')
def ShouldPassConstCheck(self, line):
"""Checks that the 'const' checker doesn't flag |line| as a style error."""
self.assertEqual('', self.checker.ConstCheck(1, line),
'Should not be flagged as style error: ' + line)
def testConstFails(self):
lines = [
"const foo = 'bar';",
" const bar = 'foo';",
# Trying to use |const| as a variable name
"var const = 0;",
"var x = 5; const y = 6;",
"for (var i=0, const e=10; i<e; i++) {",
"for (const x=0; x<foo; i++) {",
"while (const x = 7) {",
]
for line in lines:
self.ShouldFailConstCheck(line)
def testConstPasses(self):
lines = [
# sanity check
"var foo = 'bar'",
# @const JsDoc tag
"/** @const */ var SEVEN = 7;",
# @const tag in multi-line comment
" * @const",
" * @const",
# @constructor tag in multi-line comment
" * @constructor",
" * @constructor",
# words containing 'const'
"if (foo.constructor) {",
"var deconstruction = 'something';",
"var madeUpWordconst = 10;",
# Strings containing the word |const|
"var str = 'const at the beginning';",
"var str = 'At the end: const';",
# doing this one with regex is probably not practical
#"var str = 'a const in the middle';",
]
for line in lines:
self.ShouldPassConstCheck(line)
def ShouldFailChromeSendCheck(self, line):
"""Checks that the 'chrome.send' checker flags |line| as a style error."""
error = self.checker.ChromeSendCheck(1, line)
self.assertNotEqual('', error,
'Should be flagged as style error: ' + line)
self.assertEqual(self.GetHighlight(line, error), ', []')
def ShouldPassChromeSendCheck(self, line):
"""Checks that the 'chrome.send' checker doesn't flag |line| as a style
error.
"""
self.assertEqual('', self.checker.ChromeSendCheck(1, line),
'Should not be flagged as style error: ' + line)
def testChromeSendFails(self):
lines = [
"chrome.send('message', []);",
" chrome.send('message', []);",
]
for line in lines:
self.ShouldFailChromeSendCheck(line)
def testChromeSendPasses(self):
lines = [
"chrome.send('message', constructArgs('foo', []));",
" chrome.send('message', constructArgs('foo', []));",
"chrome.send('message', constructArgs([]));",
" chrome.send('message', constructArgs([]));",
]
for line in lines:
self.ShouldPassChromeSendCheck(line)
def ShouldFailEndJsDocCommentCheck(self, line):
"""Checks that the **/ checker flags |line| as a style error."""
error = self.checker.EndJsDocCommentCheck(1, line)
self.assertNotEqual('', error,
'Should be flagged as style error: ' + line)
self.assertEqual(self.GetHighlight(line, error), '**/')
def ShouldPassEndJsDocCommentCheck(self, line):
"""Checks that the **/ checker doesn't flag |line| as a style error."""
self.assertEqual('', self.checker.EndJsDocCommentCheck(1, line),
'Should not be flagged as style error: ' + line)
def testEndJsDocCommentFails(self):
lines = [
"/** @override **/",
"/** @type {number} @const **/",
" **/",
"**/ ",
]
for line in lines:
self.ShouldFailEndJsDocCommentCheck(line)
def testEndJsDocCommentPasses(self):
lines = [
"/***************/", # visual separators
" */", # valid JSDoc comment ends
"*/ ",
"/**/", # funky multi-line comment enders
"/** @override */", # legit JSDoc one-liners
]
for line in lines:
self.ShouldPassEndJsDocCommentCheck(line)
def ShouldFailGetElementByIdCheck(self, line):
"""Checks that the 'getElementById' checker flags |line| as a style
error.
"""
error = self.checker.GetElementByIdCheck(1, line)
self.assertNotEqual('', error,
'Should be flagged as style error: ' + line)
self.assertEqual(self.GetHighlight(line, error), 'document.getElementById')
def ShouldPassGetElementByIdCheck(self, line):
"""Checks that the 'getElementById' checker doesn't flag |line| as a style
error.
"""
self.assertEqual('', self.checker.GetElementByIdCheck(1, line),
'Should not be flagged as style error: ' + line)
def testGetElementByIdFails(self):
lines = [
"document.getElementById('foo');",
" document.getElementById('foo');",
"var x = document.getElementById('foo');",
"if (document.getElementById('foo').hidden) {",
]
for line in lines:
self.ShouldFailGetElementByIdCheck(line)
def testGetElementByIdPasses(self):
lines = [
"elem.ownerDocument.getElementById('foo');",
" elem.ownerDocument.getElementById('foo');",
"var x = elem.ownerDocument.getElementById('foo');",
"if (elem.ownerDocument.getElementById('foo').hidden) {",
"doc.getElementById('foo');",
" doc.getElementById('foo');",
"cr.doc.getElementById('foo');",
" cr.doc.getElementById('foo');",
"var x = doc.getElementById('foo');",
"if (doc.getElementById('foo').hidden) {",
]
for line in lines:
self.ShouldPassGetElementByIdCheck(line)
def ShouldFailInheritDocCheck(self, line):
"""Checks that the '@inheritDoc' checker flags |line| as a style error."""
error = self.checker.InheritDocCheck(1, line)
self.assertNotEqual('', error,
msg='Should be flagged as style error: ' + line)
self.assertEqual(self.GetHighlight(line, error), '@inheritDoc')
def ShouldPassInheritDocCheck(self, line):
"""Checks that the '@inheritDoc' checker doesn't flag |line| as a style
error.
"""
self.assertEqual('', self.checker.InheritDocCheck(1, line),
msg='Should not be flagged as style error: ' + line)
def testInheritDocFails(self):
lines = [
" /** @inheritDoc */",
" * @inheritDoc",
]
for line in lines:
self.ShouldFailInheritDocCheck(line)
def testInheritDocPasses(self):
lines = [
"And then I said, but I won't @inheritDoc! Hahaha!",
" If your dad's a doctor, do you inheritDoc?",
" What's up, inherit doc?",
" this.inheritDoc(someDoc)",
]
for line in lines:
self.ShouldPassInheritDocCheck(line)
def ShouldFailWrapperTypeCheck(self, line):
"""Checks that the use of wrapper types (i.e. new Number(), @type {Number})
is a style error.
"""
error = self.checker.WrapperTypeCheck(1, line)
self.assertNotEqual('', error,
msg='Should be flagged as style error: ' + line)
highlight = self.GetHighlight(line, error)
self.assertTrue(highlight in ('Boolean', 'Number', 'String'))
def ShouldPassWrapperTypeCheck(self, line):
"""Checks that the wrapper type checker doesn't flag |line| as a style
error.
"""
self.assertEqual('', self.checker.WrapperTypeCheck(1, line),
msg='Should not be flagged as style error: ' + line)
def testWrapperTypePasses(self):
lines = [
"/** @param {!ComplexType} */",
" * @type {Object}",
" * @param {Function=} opt_callback",
" * @param {} num Number of things to add to {blah}.",
" * @return {!print_preview.PageNumberSet}",
" /* @returns {Number} */", # Should be /** @return {Number} */
"* @param {!LocalStrings}"
" Your type of Boolean is false!",
" Then I parameterized her Number from her friend!",
" A String of Pearls",
" types.params.aBoolean.typeString(someNumber)",
]
for line in lines:
self.ShouldPassWrapperTypeCheck(line)
def testWrapperTypeFails(self):
lines = [
" /**@type {String}*/(string)",
" * @param{Number=} opt_blah A number",
"/** @private @return {!Boolean} */",
" * @param {number|String}",
]
for line in lines:
self.ShouldFailWrapperTypeCheck(line)
def ShouldFailVarNameCheck(self, line):
"""Checks that var unix_hacker, $dollar are style errors."""
error = self.checker.VarNameCheck(1, line)
self.assertNotEqual('', error,
msg='Should be flagged as style error: ' + line)
highlight = self.GetHighlight(line, error)
self.assertFalse('var ' in highlight);
def ShouldPassVarNameCheck(self, line):
"""Checks that variableNamesLikeThis aren't style errors."""
self.assertEqual('', self.checker.VarNameCheck(1, line),
msg='Should not be flagged as style error: ' + line)
def testVarNameFails(self):
lines = [
"var private_;",
" var _super_private",
" var unix_hacker = someFunc();",
]
for line in lines:
self.ShouldFailVarNameCheck(line)
def testVarNamePasses(self):
lines = [
" var namesLikeThis = [];",
" for (var i = 0; i < 10; ++i) { ",
"for (var i in obj) {",
" var one, two, three;",
" var magnumPI = {};",
" var g_browser = 'da browzer';",
"/** @const */ var Bla = options.Bla;", # goog.scope() replacement.
" var $ = function() {", # For legacy reasons.
" var StudlyCaps = cr.define('bla')", # Classes.
" var SCARE_SMALL_CHILDREN = [", # TODO(dbeam): add @const in
# front of all these vars like
"/** @const */ CONST_VAR = 1;", # this line has (<--).
]
for line in lines:
self.ShouldPassVarNameCheck(line)
class CssStyleGuideTest(SuperMoxTestBase):
def setUp(self):
SuperMoxTestBase.setUp(self)
self.fake_file_name = 'fake.css'
self.fake_file = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(self.fake_file, 'LocalPath')
self.fake_file.LocalPath().AndReturn(self.fake_file_name)
# Actual calls to NewContents() are defined in each test.
self.mox.StubOutWithMock(self.fake_file, 'NewContents')
self.input_api = self.mox.CreateMockAnything()
self.input_api.re = re
self.mox.StubOutWithMock(self.input_api, 'AffectedSourceFiles')
self.input_api.AffectedFiles(
include_deletes=False, file_filter=None).AndReturn([self.fake_file])
# Actual creations of PresubmitPromptWarning are defined in each test.
self.output_api = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(self.output_api, 'PresubmitPromptWarning',
use_mock_anything=True)
author_msg = ('Was the CSS checker useful? '
'Send feedback or hate mail to dbeam@chromium.org.')
self.output_api = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(self.output_api, 'PresubmitNotifyResult',
use_mock_anything=True)
self.output_api.PresubmitNotifyResult(author_msg).AndReturn(None)
def VerifyContentsProducesOutput(self, contents, output):
self.fake_file.NewContents().AndReturn(contents.splitlines())
self.output_api.PresubmitPromptWarning(
self.fake_file_name + ':\n' + output.strip()).AndReturn(None)
self.mox.ReplayAll()
css_checker.CSSChecker(self.input_api, self.output_api).RunChecks()
def testCssAlphaWithAtBlock(self):
self.VerifyContentsProducesOutput("""
<include src="../shared/css/cr/ui/overlay.css">
<include src="chrome://resources/totally-cool.css" />
/* A hopefully safely ignored comment and @media statement. /**/
@media print {
div {
display: block;
color: red;
}
}
.rule {
z-index: 5;
<if expr="not is macosx">
background-image: url(chrome://resources/BLAH); /* TODO(dbeam): Fix this. */
background-color: rgb(235, 239, 249);
</if>
<if expr="is_macosx">
background-color: white;
background-image: url(chrome://resources/BLAH2);
</if>
color: black;
}
<if expr="is_macosx">
.language-options-right {
visibility: hidden;
opacity: 1; /* TODO(dbeam): Fix this. */
}
</if>""", """
- Alphabetize properties and list vendor specific (i.e. -webkit) above standard.
display: block;
color: red;
z-index: 5;
color: black;""")
def testCssAlphaWithNonStandard(self):
self.VerifyContentsProducesOutput("""
div {
/* A hopefully safely ignored comment and @media statement. /**/
color: red;
-webkit-margin-start: 5px;
}""", """
- Alphabetize properties and list vendor specific (i.e. -webkit) above standard.
color: red;
-webkit-margin-start: 5px;""")
def testCssAlphaWithLongerDashedProps(self):
self.VerifyContentsProducesOutput("""
div {
border-left: 5px; /* A hopefully removed comment. */
border: 5px solid red;
}""", """
- Alphabetize properties and list vendor specific (i.e. -webkit) above standard.
border-left: 5px;
border: 5px solid red;""")
def testCssBracesHaveSpaceBeforeAndNothingAfter(self):
self.VerifyContentsProducesOutput("""
/* Hello! */div/* Comment here*/{
display: block;
}
blah /* hey! */
{
rule: value;
}
.this.is { /* allowed */
rule: value;
}""", """
- Start braces ({) end a selector, have a space before them and no rules after.
div{
{""")
def testCssClassesUseDashes(self):
self.VerifyContentsProducesOutput("""
.className,
.ClassName,
.class-name /* We should not catch this. */,
.class_name {
display: block;
}""", """
- Classes use .dash-form.
.className,
.ClassName,
.class_name {""")
def testCssCloseBraceOnNewLine(self):
self.VerifyContentsProducesOutput("""
@media { /* TODO(dbeam) Fix this case. */
.rule {
display: block;
}}
@-webkit-keyframe blah {
100% { height: -500px 0; }
}
#rule {
rule: value; }""", """
- Always put a rule closing brace (}) on a new line.
rule: value; }""")
def testCssColonsHaveSpaceAfter(self):
self.VerifyContentsProducesOutput("""
div:not(.class):not([attr=5]), /* We should not catch this. */
div:not(.class):not([attr]) /* Nor this. */ {
background: url(data:image/jpeg,asdfasdfsadf); /* Ignore this. */
background: -webkit-linear-gradient(left, red,
80% blah blee blar);
color: red;
display:block;
}""", """
- Colons (:) should have a space after them.
display:block;
- Don't use data URIs in source files. Use grit instead.
background: url(data:image/jpeg,asdfasdfsadf);""")
def testCssFavorSingleQuotes(self):
self.VerifyContentsProducesOutput("""
html[dir="rtl"] body,
html[dir=ltr] body /* TODO(dbeam): Require '' around rtl in future? */ {
background: url("chrome://resources/BLAH");
font-family: "Open Sans";
<if expr="is_macosx">
blah: blee;
</if>
}""", """
- Use single quotes (') instead of double quotes (") in strings.
html[dir="rtl"] body,
background: url("chrome://resources/BLAH");
font-family: "Open Sans";""")
def testCssHexCouldBeShorter(self):
self.VerifyContentsProducesOutput("""
#abc,
#abc-,
#abc-ghij,
#abcdef-,
#abcdef-ghij,
#aaaaaa,
#bbaacc {
background-color: #336699; /* Ignore short hex rule if not gray. */
color: #999999;
color: #666;
}""", """
- Use abbreviated hex (#rgb) when in form #rrggbb.
color: #999999; (replace with #999)
- Use rgb() over #hex when not a shade of gray (like #333).
background-color: #336699; (replace with rgb(51, 102, 153))""")
def testCssUseMillisecondsForSmallTimes(self):
self.VerifyContentsProducesOutput("""
.transition-0s /* This is gross but may happen. */ {
transform: one 0.2s;
transform: two .1s;
transform: tree 1s;
transform: four 300ms;
}""", """
- Use milliseconds for time measurements under 1 second.
transform: one 0.2s; (replace with 200ms)
transform: two .1s; (replace with 100ms)""")
def testCssNoDataUrisInSourceFiles(self):
self.VerifyContentsProducesOutput("""
img {
background: url( data:image/jpeg,4\/\/350|\/|3|2 );
background: url('data:image/jpeg,4\/\/350|\/|3|2');
}""", """
- Don't use data URIs in source files. Use grit instead.
background: url( data:image/jpeg,4\/\/350|\/|3|2 );
background: url('data:image/jpeg,4\/\/350|\/|3|2');""")
def testCssOneRulePerLine(self):
self.VerifyContentsProducesOutput("""
a:not([hidden]):not(.custom-appearance):not([version=1]):first-of-type,
a:not([hidden]):not(.custom-appearance):not([version=1]):first-of-type ~
input[type='checkbox']:not([hidden]),
div {
background: url(chrome://resources/BLAH);
rule: value; /* rule: value; */
rule: value; rule: value;
}""", """
- One rule per line (what not to do: color: red; margin: 0;).
rule: value; rule: value;""")
def testCssOneSelectorPerLine(self):
self.VerifyContentsProducesOutput("""
a,
div,a,
div,/* Hello! */ span,
#id.class([dir=rtl):not(.class):any(a, b, d) {
rule: value;
}
a,
div,a {
some-other: rule here;
}""", """
- One selector per line (what not to do: a, b {}).
div,a,
div, span,
div,a {""")
def testCssPseudoElementDoubleColon(self):
self.VerifyContentsProducesOutput("""
a:href,
br::after,
::-webkit-scrollbar-thumb,
a:not([empty]):hover:focus:active, /* shouldn't catch here and above */
abbr:after,
.tree-label:empty:after,
b:before,
:-WebKit-ScrollBar {
rule: value;
}""", """
- Pseudo-elements should use double colon (i.e. ::after).
:after (should be ::after)
:after (should be ::after)
:before (should be ::before)
:-WebKit-ScrollBar (should be ::-WebKit-ScrollBar)
""")
def testCssRgbIfNotGray(self):
self.VerifyContentsProducesOutput("""
#abc,
#aaa,
#aabbcc {
background: -webkit-linear-gradient(left, from(#abc), to(#def));
color: #bad;
color: #bada55;
}""", """
- Use rgb() over #hex when not a shade of gray (like #333).
background: -webkit-linear-gradient(left, from(#abc), to(#def)); """
"""(replace with rgb(170, 187, 204), rgb(221, 238, 255))
color: #bad; (replace with rgb(187, 170, 221))
color: #bada55; (replace with rgb(186, 218, 85))""")
def testCssZeroLengthTerms(self):
self.VerifyContentsProducesOutput("""
@-webkit-keyframe anim {
0% { /* Ignore key frames */
width: 0px;
}
10% {
width: 10px;
}
100% {
width: 100px;
}
}
/* http://crbug.com/359682 */
#spinner-container #spinner {
-webkit-animation-duration: 1.0s;
}
.media-button.play > .state0.active,
.media-button[state='0'] > .state0.normal /* blah */, /* blee */
.media-button[state='0']:not(.disabled):hover > .state0.hover {
-webkit-animation: anim 0s;
-webkit-animation-duration: anim 0ms;
-webkit-transform: scale(0%),
translateX(0deg),
translateY(0rad),
translateZ(0grad);
background-position-x: 0em;
background-position-y: 0ex;
border-width: 0em;
color: hsl(0, 0%, 85%); /* Shouldn't trigger error. */
opacity: .0;
opacity: 0.0;
opacity: 0.;
}
@page {
border-width: 0mm;
height: 0cm;
width: 0in;
}""", """
- Make all zero length terms (i.e. 0px) 0 unless inside of hsl() or part of"""
""" @keyframe.
width: 0px;
-webkit-animation: anim 0s;
-webkit-animation-duration: anim 0ms;
-webkit-transform: scale(0%),
translateX(0deg),
translateY(0rad),
translateZ(0grad);
background-position-x: 0em;
background-position-y: 0ex;
border-width: 0em;
opacity: .0;
opacity: 0.0;
opacity: 0.;
border-width: 0mm;
height: 0cm;
width: 0in;
""")
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
nightjean/Deep-Learning | tensorflow/contrib/distributions/python/kernel_tests/mvn_full_covariance_test.py | 31 | 6072 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultivariateNormalFullCovariance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib import distributions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
ds = distributions
rng = np.random.RandomState(42)
class MultivariateNormalFullCovarianceTest(test.TestCase):
def _random_pd_matrix(self, *shape):
mat = rng.rand(*shape)
chol = ds.matrix_diag_transform(mat, transform=nn_ops.softplus)
chol = array_ops.matrix_band_part(chol, -1, 0)
return math_ops.matmul(chol, chol, adjoint_b=True).eval()
def testRaisesIfInitializedWithNonSymmetricMatrix(self):
with self.test_session():
mu = [1., 2.]
sigma = [[1., 0.], [1., 1.]] # Nonsingular, but not symmetric
mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)
with self.assertRaisesOpError("not symmetric"):
mvn.covariance().eval()
def testDoesNotRaiseIfInitializedWithSymmetricMatrix(self):
with self.test_session():
mu = rng.rand(10)
sigma = self._random_pd_matrix(10, 10)
mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)
# Should not raise
mvn.covariance().eval()
def testLogPDFScalarBatch(self):
with self.test_session():
mu = rng.rand(2)
sigma = self._random_pd_matrix(2, 2)
mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)
x = rng.rand(2)
log_pdf = mvn.log_prob(x)
pdf = mvn.prob(x)
scipy_mvn = stats.multivariate_normal(mean=mu, cov=sigma)
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertEqual((), log_pdf.get_shape())
self.assertEqual((), pdf.get_shape())
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(expected_pdf, pdf.eval())
def testLogPDFScalarBatchCovarianceNotProvided(self):
with self.test_session():
mu = rng.rand(2)
mvn = ds.MultivariateNormalFullCovariance(
mu, covariance_matrix=None, validate_args=True)
x = rng.rand(2)
log_pdf = mvn.log_prob(x)
pdf = mvn.prob(x)
# Initialize a scipy_mvn with the default covariance.
scipy_mvn = stats.multivariate_normal(mean=mu, cov=np.eye(2))
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertEqual((), log_pdf.get_shape())
self.assertEqual((), pdf.get_shape())
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(expected_pdf, pdf.eval())
def testShapes(self):
with self.test_session():
mu = rng.rand(3, 5, 2)
covariance = self._random_pd_matrix(3, 5, 2, 2)
mvn = ds.MultivariateNormalFullCovariance(
mu, covariance, validate_args=True)
# Shapes known at graph construction time.
self.assertEqual((2,), tuple(mvn.event_shape.as_list()))
self.assertEqual((3, 5), tuple(mvn.batch_shape.as_list()))
# Shapes known at runtime.
self.assertEqual((2,), tuple(mvn.event_shape_tensor().eval()))
self.assertEqual((3, 5), tuple(mvn.batch_shape_tensor().eval()))
def _random_mu_and_sigma(self, batch_shape, event_shape):
# This ensures sigma is positive def.
mat_shape = batch_shape + event_shape + event_shape
mat = rng.randn(*mat_shape)
perm = np.arange(mat.ndim)
perm[-2:] = [perm[-1], perm[-2]]
sigma = np.matmul(mat, np.transpose(mat, perm))
mu_shape = batch_shape + event_shape
mu = rng.randn(*mu_shape)
return mu, sigma
def testKLBatch(self):
batch_shape = (2,)
event_shape = (3,)
with self.test_session():
mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)
mu_b, sigma_b = self._random_mu_and_sigma(batch_shape, event_shape)
mvn_a = ds.MultivariateNormalFullCovariance(
loc=mu_a,
covariance_matrix=sigma_a,
validate_args=True)
mvn_b = ds.MultivariateNormalFullCovariance(
loc=mu_b,
covariance_matrix=sigma_b,
validate_args=True)
kl = ds.kl_divergence(mvn_a, mvn_b)
self.assertEqual(batch_shape, kl.get_shape())
kl_v = kl.eval()
expected_kl_0 = _compute_non_batch_kl(mu_a[0, :], sigma_a[0, :, :],
mu_b[0, :], sigma_b[0, :])
expected_kl_1 = _compute_non_batch_kl(mu_a[1, :], sigma_a[1, :, :],
mu_b[1, :], sigma_b[1, :])
self.assertAllClose(expected_kl_0, kl_v[0])
self.assertAllClose(expected_kl_1, kl_v[1])
def _compute_non_batch_kl(mu_a, sigma_a, mu_b, sigma_b):
"""Non-batch KL for N(mu_a, sigma_a), N(mu_b, sigma_b)."""
# Check using numpy operations
# This mostly repeats the tensorflow code _kl_mvn_mvn(), but in numpy.
# So it is important to also check that KL(mvn, mvn) = 0.
sigma_b_inv = np.linalg.inv(sigma_b)
t = np.trace(sigma_b_inv.dot(sigma_a))
q = (mu_b - mu_a).dot(sigma_b_inv).dot(mu_b - mu_a)
k = mu_a.shape[0]
l = np.log(np.linalg.det(sigma_b) / np.linalg.det(sigma_a))
return 0.5 * (t + q - k + l)
if __name__ == "__main__":
test.main()
| apache-2.0 |
alexteodor/odoo | addons/project/res_partner.py | 334 | 1953 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class res_partner(osv.osv):
def _task_count(self, cr, uid, ids, field_name, arg, context=None):
Task = self.pool['project.task']
return {
partner_id: Task.search_count(cr,uid, [('partner_id', '=', partner_id)], context=context)
for partner_id in ids
}
""" Inherits partner and adds Tasks information in the partner form """
_inherit = 'res.partner'
_columns = {
'task_ids': fields.one2many('project.task', 'partner_id', 'Tasks'),
'task_count': fields.function(_task_count, string='# Tasks', type='integer'),
}
def copy(self, cr, uid, record_id, default=None, context=None):
if default is None:
default = {}
default['task_ids'] = []
return super(res_partner, self).copy(
cr, uid, record_id, default=default, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
wannaphongcom/flappy | flappy/events/keyboardevent.py | 2 | 1615 |
from event import Event
class KeyboardEvent(Event):
KEY_DOWN = "keyDown"
KEY_UP = "keyUp"
def __init__(self, etype, bubbles=False, cancelable=False,
charCodeValue=0, keyCodeValue=0, keyLocationValue=0,
ctrlKeyValue=False, altKeyValue=False,
shiftKeyValue=False, controlKeyValue=False,
commandKeyValue=False):
Event.__init__(self, etype, bubbles, cancelable)
self.keyCode = keyCodeValue
self.keyLocation = keyLocationValue
self.charCode = charCodeValue
self.shiftKey = shiftKeyValue
self.altKey = altKeyValue
self.controlKey = controlKeyValue
self.commandKey = commandKeyValue
self.ctrlKey = ctrlKeyValue or self.controlKey or self.commandKey
def clone(self):
return KeyboardEvent(self.type, self.bubbles, self.cancelable,
self.charCode, self.keyCode, self.keyLocation,
self.ctrlKey, self.altKey, self.shiftKey,
self.controlKey, self.commandKey)
def __str__(self):
s = '[KeyboardEvent type=%s bubbles=%s cancelable=%s' % \
(self.type, str(self.bubbles), str(self.cancelable))
s += 'charCode=%s keyCode=%s keyLocation=%s ' % \
(str(self.charCode), str(self.keyCode), str(self.keyLocation))
s += 'ctrlKey=%s altKey=%s shiftKey=%s]' % \
(str(self.ctrlKey), str(self.altKey), str(self.shiftKey))
return s
| mit |
hagabbar/pycbc_copy | pycbc/io/inference_hdf.py | 1 | 27471 | # Copyright (C) 2016 Christopher M. Biwer
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# self.option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""This modules defines functions for reading and writing samples that the
inference samplers generate.
"""
import os
import sys
import h5py
import numpy
import logging
from pycbc import DYN_RANGE_FAC
from pycbc.types import FrequencySeries
from pycbc.waveform import parameters as wfparams
import pycbc.inference.sampler
import pycbc.inference.likelihood
from pycbc.io import FieldArray
class _PosteriorOnlyParser(object):
"""Provides interface for reading/writing samples from/to an InferenceFile
that contains flattened posterior samples.
"""
@staticmethod
def _read_fields(fp, fields_group, fields, array_class,
thin_start=None, thin_interval=None, thin_end=None,
iteration=None):
"""Reads fields from the given file.
"""
if iteration is not None:
get_index = iteration
else:
get_index = fp.get_slice(thin_start=thin_start, thin_end=thin_end,
thin_interval=thin_interval)
# load
arrays = {}
group = fields_group + '/{}'
arrays = {field: fp[group.format(field)][get_index]
for field in fields}
return array_class.from_kwargs(**arrays)
@classmethod
def read_samples(cls, fp, parameters, samples_group=None,
thin_start=0, thin_end=None, thin_interval=1,
iteration=None, array_class=None):
"""Reads posterior samples from a posterior-only file.
"""
# get the group to load from
if samples_group is None:
samples_group = fp.samples_group
# get the type of array class to use
if array_class is None:
array_class = FieldArray
# get the names of fields needed for the given parameters
possible_fields = fp[samples_group].keys()
loadfields = array_class.parse_parameters(parameters, possible_fields)
return cls._read_fields(fp, samples_group, loadfields, array_class,
thin_start=thin_start,
thin_interval=thin_interval, thin_end=thin_end,
iteration=iteration)
@staticmethod
def write_samples_group(fp, samples_group, fields, samples):
"""Writes the given samples to the given samples group.
"""
for field in samples.fieldnames:
grp = '{}/{}'.format(samples_group, field)
fp[grp] = samples[field]
@classmethod
def n_independent_samples(cls, fp):
"""Returns the number of independent samples stored in the file.
"""
return cls.read_samples(fp, fp.variable_args[0]).size
class InferenceFile(h5py.File):
""" A subclass of the h5py.File object that has extra functions for
handling reading and writing the samples from the samplers.
Parameters
-----------
path : str
The path to the HDF file.
mode : {None, str}
The mode to open the file, eg. "w" for write and "r" for read.
"""
name = "hdf"
samples_group = 'samples'
stats_group = 'likelihood_stats'
sampler_group = 'sampler_states'
def __init__(self, path, mode=None, **kwargs):
super(InferenceFile, self).__init__(path, mode, **kwargs)
@property
def posterior_only(self):
"""Whether the file only contains flattened posterior samples.
"""
try:
return self.attrs['posterior_only']
except KeyError:
return False
@property
def sampler_name(self):
"""Returns the name of the sampler that was used."""
return self.attrs["sampler"]
@property
def sampler_class(self):
"""Returns the sampler class that was used."""
try:
sampler = self.sampler_name
except KeyError:
return None
return pycbc.inference.sampler.samplers[sampler]
@property
def samples_parser(self):
"""Returns the class to use to read/write samples from/to the file."""
if self.posterior_only:
return _PosteriorOnlyParser
else:
return self.sampler_class
@property
def likelihood_eval_name(self):
"""Returns the name of the likelihood evaluator that was used."""
return self.attrs["likelihood_evaluator"]
@property
def variable_args(self):
"""Returns list of variable_args.
Returns
-------
variable_args : {list, str}
List of str that contain variable_args keys.
"""
return self.attrs["variable_args"]
@property
def static_args(self):
"""Returns a dictionary of the static_args. The keys are the argument
names, values are the value they were set to.
"""
return dict([[arg, self.attrs[arg]]
for arg in self.attrs["static_args"]])
@property
def sampling_args(self):
"""Returns the parameters that were used to sample.
Returns
-------
sampling_args : {list, str}
List of the sampling args.
"""
return self.attrs["sampling_args"]
@property
def lognl(self):
"""Returns the log noise likelihood."""
return self.attrs["lognl"]
@property
def niterations(self):
"""Returns number of iterations performed.
Returns
-------
niterations : int
Number of iterations performed.
"""
return self.attrs["niterations"]
@property
def n_independent_samples(self):
"""Returns the number of independent samples stored in the file.
"""
return self.samples_parser.n_independent_samples(self)
@property
def burn_in_iterations(self):
"""Returns number of iterations in the burn in.
"""
return self.attrs["burn_in_iterations"]
@property
def is_burned_in(self):
"""Returns whether or not the sampler is burned in.
"""
return self.attrs["is_burned_in"]
@property
def nwalkers(self):
"""Returns number of walkers used.
Returns
-------
nwalkesr : int
Number of walkers used.
"""
return self.attrs["nwalkers"]
@property
def ntemps(self):
"""Returns number of temperatures used."""
return self.attrs["ntemps"]
@property
def acl(self):
""" Returns the saved autocorelation length (ACL).
Returns
-------
acl : {int, float}
The ACL.
"""
return self.attrs["acl"]
@property
def cmd(self):
""" Returns the saved command line.
Returns
-------
cmd : {str}
The command line that created this InferenceFile.
"""
return self.attrs["cmd"]
@property
def log_evidence(self):
"""Returns the log of the evidence and its error, if they exist in the
file. Raises a KeyError otherwise.
"""
return self.attrs["log_evidence"], self.attrs["dlog_evidence"]
def read_samples(self, parameters, samples_group=None, **kwargs):
"""Reads samples from the file.
Parameters
-----------
parameters : (list of) strings
The parameter(s) to retrieve. A parameter can be the name of any
field in `samples_group`, a virtual field or method of
`FieldArray` (as long as the file contains the necessary fields
to derive the virtual field or method), and/or a function of
these.
samples_group : str
Group in HDF InferenceFile that parameters belong to.
\**kwargs :
The rest of the keyword args are passed to the sampler's
`read_samples` method.
Returns
-------
FieldArray
Samples for the given parameters, as an instance of a
FieldArray.
"""
# get the appropriate sampler class
samples_group = samples_group if samples_group else self.samples_group
return self.samples_parser.read_samples(self, parameters,
samples_group=samples_group,
**kwargs)
def read_likelihood_stats(self, **kwargs):
"""Reads likelihood stats from self.
Parameters
-----------
\**kwargs :
The keyword args are passed to the sampler's `read_likelihood_stats`
method.
Returns
-------
stats : {FieldArray, None}
Likelihood stats in the file, as a FieldArray. The fields of the
array are the names of the stats that are in the `likelihood_stats`
group.
"""
parameters = self[self.stats_group].keys()
return self.read_samples(parameters, samples_group=self.stats_group,
**kwargs)
def read_acceptance_fraction(self, **kwargs):
"""Returns the acceptance fraction that was written to the file.
Parameters
----------
\**kwargs :
All keyword arguments are passed to the sampler's
`read_acceptance_fraction` function.
Returns
-------
numpy.array
The acceptance fraction.
"""
return self.sampler_class.read_acceptance_fraction(self, **kwargs)
def read_acls(self):
"""Returns all of the individual chains' acls. See the `read_acls`
function of this file's sampler for more details.
"""
return self.sampler_class.read_acls(self)
def read_label(self, parameter, error_on_none=False):
"""Returns the label for the parameter.
Parameters
-----------
parameter : str
Name of parameter to get a label for. Will first try to retrieve
a label from this file's "label" attributes. If the parameter
is not found there, will look for a label from
pycbc.waveform.parameters.
error_on_none : {False, bool}
If True, will raise a ValueError if a label cannot be found, or if
the label is None. Otherwise, the parameter will just be returned
if no label can be found.
Returns
-------
label : str
A formatted string for the name of the paramter.
"""
# get label
try:
label = self[parameter].attrs["label"]
except KeyError:
# try looking in pycbc.waveform.parameters
try:
label = getattr(wfparams, parameter).label
except AttributeError:
label = None
if label is None:
if error_on_none:
raise ValueError("Cannot find a label for paramter %s" %(
parameter))
else:
return parameter
return label
def read_random_state(self, group=None):
""" Reads the state of the random number generator from the file.
Parameters
----------
group : str
Name of group to read random state from.
Returns
-------
tuple
A tuple with 5 elements that can be passed to numpy.set_state.
"""
group = self.sampler_group if group is None else group
dataset_name = "/".join([group, "random_state"])
arr = self[dataset_name][:]
s = self[dataset_name].attrs["s"]
pos = self[dataset_name].attrs["pos"]
has_gauss = self[dataset_name].attrs["has_gauss"]
cached_gauss = self[dataset_name].attrs["cached_gauss"]
return s, arr, pos, has_gauss, cached_gauss
def write_strain(self, strain_dict, group=None):
"""Writes strain for each IFO to file.
Parameters
-----------
strain : {dict, FrequencySeries}
A dict of FrequencySeries where the key is the IFO.
group : {None, str}
The group to write the strain to. If None, will write to the top
level.
"""
subgroup = "{ifo}/strain"
if group is None:
group = subgroup
else:
group = '/'.join([group, subgroup])
for ifo,strain in strain_dict.items():
self[group.format(ifo=ifo)] = strain
self[group.format(ifo=ifo)].attrs['delta_t'] = strain.delta_t
self[group.format(ifo=ifo)].attrs['start_time'] = \
float(strain.start_time)
def write_stilde(self, stilde_dict, group=None):
"""Writes stilde for each IFO to file.
Parameters
-----------
stilde : {dict, FrequencySeries}
A dict of FrequencySeries where the key is the IFO.
group : {None, str}
The group to write the strain to. If None, will write to the top
level.
"""
subgroup = "{ifo}/stilde"
if group is None:
group = subgroup
else:
group = '/'.join([group, subgroup])
for ifo,stilde in stilde_dict.items():
self[group.format(ifo=ifo)] = stilde
self[group.format(ifo=ifo)].attrs['delta_f'] = stilde.delta_f
self[group.format(ifo=ifo)].attrs['epoch'] = float(stilde.epoch)
def write_psd(self, psds, low_frequency_cutoff, group=None):
"""Writes PSD for each IFO to file.
Parameters
-----------
psds : {dict, FrequencySeries}
A dict of FrequencySeries where the key is the IFO.
low_frequency_cutoff : {dict, float}
A dict of the low-frequency cutoff where the key is the IFO. The
minimum value will be stored as an attr in the File.
group : {None, str}
The group to write the strain to. If None, will write to the top
level.
"""
subgroup = "{ifo}/psds/0"
if group is None:
group = subgroup
else:
group = '/'.join([group, subgroup])
self.attrs["low_frequency_cutoff"] = min(low_frequency_cutoff.values())
for ifo in psds:
self[group.format(ifo=ifo)] = psds[ifo]
self[group.format(ifo=ifo)].attrs['delta_f'] = psds[ifo].delta_f
def write_data(self, strain_dict=None, stilde_dict=None,
psd_dict=None, low_frequency_cutoff_dict=None,
group=None):
"""Writes the strain/stilde/psd.
Parameters
----------
strain_dict : {None, dict}
A dictionary of strains. If None, no strain will be written.
stilde_dict : {None, dict}
A dictionary of stilde. If None, no stilde will be written.
psd_dict : {None, dict}
A dictionary of psds. If None, no psds will be written.
low_freuency_cutoff_dict : {None, dict}
A dictionary of low frequency cutoffs used for each detector in
`psd_dict`; must be provided if `psd_dict` is not None.
group : {None, str}
The group to write the strain to. If None, will write to the top
level.
"""
# save PSD
if psd_dict is not None:
if low_frequency_cutoff_dict is None:
raise ValueError("must provide low_frequency_cutoff_dict if "
"saving psds to output")
# apply dynamic range factor for saving PSDs since
# plotting code expects it
psd_dyn_dict = {}
for key,val in psd_dict.iteritems():
psd_dyn_dict[key] = FrequencySeries(val*DYN_RANGE_FAC**2,
delta_f=val.delta_f)
self.write_psd(psds=psd_dyn_dict,
low_frequency_cutoff=low_frequency_cutoff_dict,
group=group)
# save stilde
if stilde_dict is not None:
self.write_stilde(stilde_dict, group=group)
# save strain if desired
if strain_dict is not None:
self.write_strain(strain_dict, group=group)
def write_injections(self, injection_file, ifo):
""" Writes injection parameters for an IFO to file.
Parameters
----------
injection_file : str
Path to HDF injection file.
ifo : str
IFO name.
"""
subgroup = "{ifo}/injections"
self.create_group(subgroup.format(ifo=ifo))
try:
with h5py.File(injection_file, "r") as fp:
for param in fp.keys():
self[subgroup.format(ifo=ifo)][param] = fp[param][:]
for key in fp.attrs.keys():
self[subgroup.format(ifo=ifo)].attrs[key] = fp.attrs[key]
except IOError:
logging.warn("Could not read %s as an HDF file", injection_file)
def write_command_line(self):
"""Writes command line to attributes.
Parameters
----------
opts : argparse.ArgumentParser
The parsed command line instance.
"""
self.attrs["cmd"] = " ".join(sys.argv)
def write_random_state(self, group=None, state=None):
""" Writes the state of the random number generator from the file.
Parameters
----------
group : str
Name of group to read random state to.
state : tuple, optional
Specify the random state to write. If None, will use
``numpy.random.get_state()``.
"""
group = self.sampler_group if group is None else group
dataset_name = "/".join([group, "random_state"])
if state is None:
state = numpy.random.get_state()
s, arr, pos, has_gauss, cached_gauss = state
if group in self:
self[dataset_name][:] = arr
else:
self.create_dataset(dataset_name, arr.shape, fletcher32=True)
self[dataset_name][:] = arr
self[dataset_name].attrs["s"] = s
self[dataset_name].attrs["pos"] = pos
self[dataset_name].attrs["has_gauss"] = has_gauss
self[dataset_name].attrs["cached_gauss"] = cached_gauss
def get_slice(self, thin_start=None, thin_interval=None, thin_end=None):
"""Formats a slice using the given arguments that can be used to
retrieve a thinned array from an InferenceFile.
Parameters
----------
thin_start : {None, int}
The starting index to use. If None, will try to retrieve the
`burn_in_iterations` from the given file. If no
`burn_in_iterations` exists, will default to the start of the
array.
thin_interval : {None, int}
The interval to use. If None, will try to retrieve the acl from the
given file. If no acl attribute exists, will default to 1.
thin_end : {None, int}
The end index to use. If None, will retrieve to the end of the
array.
Returns
-------
slice :
The slice needed.
"""
# default is to skip burn in samples
if thin_start is None:
try:
thin_start = self.burn_in_iterations
# if the sampler hasn't burned in, the burn_in_iterations will
# be the same as the number of iterations, which would result
# in 0 samples. In that case, just use the last one
if thin_start == self.niterations:
thin_start = thin_start - 1
except KeyError:
pass
# default is to use stored ACL and accept every i-th sample
if thin_interval is None:
try:
thin_interval = int(numpy.ceil(self.acl))
except KeyError:
pass
return slice(thin_start, thin_end, thin_interval)
def copy_metadata(self, other):
"""Copies all metadata from this file to the other file.
Metadata is defined as all data that is not in either the samples or
stats group.
Parameters
----------
other : InferenceFile
An open inference file to write the data to.
"""
logging.info("Copying metadata")
# copy non-samples/stats data
for key in self.keys():
if key not in [self.samples_group, self.stats_group]:
super(InferenceFile, self).copy(key, other)
# copy attributes
for key in self.attrs.keys():
other.attrs[key] = self.attrs[key]
def copy(self, other, parameters=None, parameter_names=None,
posterior_only=False, **kwargs):
"""Copies data in this file to another file.
The samples and stats to copy may be down selected using the given
kwargs. All other data (the "metadata") are copied exactly.
Parameters
----------
other : str or InferenceFile
The file to write to. May be either a string giving a filename,
or an open hdf file. If the former, the file will be opened with
the write attribute (note that if a file already exists with that
name, it will be deleted).
parameters : list of str, optional
List of parameters to copy. If None, will copy all parameters.
parameter_names : dict, optional
Rename one or more parameters to the given name. The dictionary
should map parameter -> parameter name. If None, will just use the
original parameter names.
posterior_only : bool, optional
Write the samples and likelihood stats as flattened arrays, and
set other's posterior_only attribute. For example, if this file
has a parameter's samples written to
`{samples_group}/{param}/walker{x}`, then other will have all of
the selected samples from all walkers written to
`{samples_group}/{param}/`.
\**kwargs :
All other keyword arguments are passed to `read_samples`.
Returns
-------
InferenceFile
The open file handler to other.
"""
if not isinstance(other, h5py.File):
# check that we're not trying to overwrite this file
if other == self.name:
raise IOError("destination is the same as this file")
other = InferenceFile(other, 'w')
# copy metadata over
self.copy_metadata(other)
# update other's posterior attribute
if posterior_only:
other.attrs['posterior_only'] = posterior_only
# select the samples to copy
logging.info("Reading samples to copy")
if parameters is None:
parameters = self.variable_args
# if list of desired parameters is different, rename variable args
if set(parameters) != set(self.variable_args):
other.attrs['variable_args'] = parameters
# if only the posterior is desired, we'll flatten the results
if not posterior_only and not self.posterior_only:
kwargs['flatten'] = False
samples = self.read_samples(parameters, **kwargs)
logging.info("Copying {} samples".format(samples.size))
# if different parameter names are desired, get them from the samples
if parameter_names:
arrs = {pname: samples[p] for p,pname in parameter_names.items()}
arrs.update({p: samples[p] for p in parameters
if p not in parameter_names})
samples = FieldArray.from_kwargs(**arrs)
other.attrs['variable_args'] = samples.fieldnames
logging.info("Writing samples")
other.samples_parser.write_samples_group(other, self.samples_group,
samples.fieldnames, samples)
# do the same for the likelihood stats
logging.info("Reading stats to copy")
stats = self.read_likelihood_stats(**kwargs)
logging.info("Writing stats")
other.samples_parser.write_samples_group(other, self.stats_group,
stats.fieldnames, stats)
# if any down selection was done, re-set the burn in iterations and
# the acl, and the niterations.
# The last dimension of the samples returned by the sampler should
# be the number of iterations.
if samples.shape[-1] != self.niterations:
other.attrs['acl'] = 1
other.attrs['burn_in_iterations'] = 0
other.attrs['niterations'] = samples.shape[-1]
return other
def check_integrity(filename):
"""Checks the integrity of an InferenceFile.
Checks done are:
* can the file open?
* do all of the datasets in the samples group have the same shape?
* can the first and last sample in all of the datasets in the samples
group be read?
If any of these checks fail, an IOError is raised.
Parameters
----------
filename: str
Name of an InferenceFile to check.
Raises
------
ValueError
If the given file does not exist.
KeyError
If the samples group does not exist.
IOError
If any of the checks fail.
"""
# check that the file exists
if not os.path.exists(filename):
raise ValueError("file {} does not exist".format(filename))
# if the file is corrupted such that it cannot be opened, the next line
# will raise an IOError
with InferenceFile(filename, 'r') as fp:
# check that all datasets in samples have the same shape
parameters = fp[fp.samples_group].keys()
group = fp.samples_group + '/{}'
# use the first parameter as a reference shape
ref_shape = fp[group.format(parameters[0])].shape
if not all(fp[group.format(param)].shape == ref_shape
for param in parameters):
raise IOError("not all datasets in the samples group have the same "
"shape")
# check that we can read the first/last sample
firstidx = tuple([0]*len(ref_shape))
lastidx = tuple([-1]*len(ref_shape))
for param in parameters:
_ = fp[group.format(param)][firstidx]
_ = fp[group.format(param)][lastidx]
| gpl-3.0 |
sandymanu/sandy_sambar_8994 | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
tedder/ansible | lib/ansible/plugins/lookup/onepassword.py | 23 | 9110 | # -*- coding: utf-8 -*-
# Copyright: (c) 2018, Scott Buchanan <sbuchanan@ri.pn>
# Copyright: (c) 2016, Andrew Zenk <azenk@umn.edu> (lastpass.py used as starting point)
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
lookup: onepassword
author:
- Scott Buchanan (@scottsb)
- Andrew Zenk (@azenk)
- Sam Doran (@samdoran)
version_added: "2.6"
requirements:
- C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
short_description: fetch field values from 1Password
description:
- C(onepassword) wraps the C(op) command line utility to fetch specific field values from 1Password.
options:
_terms:
description: identifier(s) (UUID, name, or subdomain; case-insensitive) of item(s) to retrieve.
required: True
field:
description: field to return from each matching item (case-insensitive).
default: 'password'
master_password:
description: The password used to unlock the specified vault.
default: None
version_added: '2.7'
aliases: ['vault_password']
section:
description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section.
default: None
subdomain:
description: The 1Password subdomain to authenticate against.
default: None
version_added: '2.7'
username:
description: The username used to sign in.
version_added: '2.7'
secret_key:
description: The secret key used when performing an initial sign in.
version_added: '2.7'
vault:
description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults.
default: None
notes:
- This lookup will use an existing 1Password session if one exists. If not, and you have already
performed an initial sign in (meaning C(~/.op/config exists)), then only the C(master_password) is required.
You may optionally specify C(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op).
- This lookup can perform an initial login by providing C(subdomain), C(username), C(secret_key), and C(master_password).
- Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials
needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength
to the 1Password master password.
- This lookup stores potentially sensitive data from 1Password as Ansible facts.
Facts are subject to caching if enabled, which means this data could be stored in clear text
on disk or in a database.
- Tested with C(op) version 0.5.3
"""
EXAMPLES = """
# These examples only work when already signed in to 1Password
- name: Retrieve password for KITT when already signed in to 1Password
debug:
var: lookup('onepassword', 'KITT')
- name: Retrieve password for Wintermute when already signed in to 1Password
debug:
var: lookup('onepassword', 'Tessier-Ashpool', section='Wintermute')
- name: Retrieve username for HAL when already signed in to 1Password
debug:
var: lookup('onepassword', 'HAL 9000', field='username', vault='Discovery')
- name: Retrieve password for HAL when not signed in to 1Password
debug:
var: lookup('onepassword'
'HAL 9000'
subdomain='Discovery'
master_password=vault_master_password)
- name: Retrieve password for HAL when never signed in to 1Password
debug:
var: lookup('onepassword'
'HAL 9000'
subdomain='Discovery'
master_password=vault_master_password
username='tweety@acme.com'
secret_key=vault_secret_key)
"""
RETURN = """
_raw:
description: field data requested
"""
import errno
import json
import os
from subprocess import Popen, PIPE
from ansible.plugins.lookup import LookupBase
from ansible.errors import AnsibleLookupError
from ansible.module_utils._text import to_bytes, to_text
class OnePass(object):
def __init__(self, path='op'):
self.cli_path = path
self.config_file_path = os.path.expanduser('~/.op/config')
self.logged_in = False
self.token = None
self.subdomain = None
self.username = None
self.secret_key = None
self.master_password = None
def get_token(self):
# If the config file exists, assume an initial signin has taken place and try basic sign in
if os.path.isfile(self.config_file_path):
if not self.master_password:
raise AnsibleLookupError('Unable to sign in to 1Password. master_password is required.')
try:
args = ['signin', '--output=raw']
if self.subdomain:
args = ['signin', self.subdomain, '--output=raw']
rc, out, err = self._run(args, command_input=to_bytes(self.master_password))
self.token = out.strip()
except AnsibleLookupError:
self.full_login()
else:
# Attempt a full sign in since there appears to be no existing sign in
self.full_login()
def assert_logged_in(self):
try:
rc, out, err = self._run(['get', 'account'], ignore_errors=True)
if rc == 0:
self.logged_in = True
if not self.logged_in:
self.get_token()
except OSError as e:
if e.errno == errno.ENOENT:
raise AnsibleLookupError("1Password CLI tool '%s' not installed in path on control machine" % self.cli_path)
raise e
def get_raw(self, item_id, vault=None):
args = ["get", "item", item_id]
if vault is not None:
args += ['--vault={0}'.format(vault)]
if not self.logged_in:
args += [to_bytes('--session=') + self.token]
rc, output, dummy = self._run(args)
return output
def get_field(self, item_id, field, section=None, vault=None):
output = self.get_raw(item_id, vault)
return self._parse_field(output, field, section) if output != '' else ''
def full_login(self):
if None in [self.subdomain, self.username, self.secret_key, self.master_password]:
raise AnsibleLookupError('Unable to perform initial sign in to 1Password. '
'subdomain, username, secret_key, and master_password are required to perform initial sign in.')
args = [
'signin',
'{0}.1password.com'.format(self.subdomain),
to_bytes(self.username),
to_bytes(self.secret_key),
'--output=raw',
]
rc, out, err = self._run(args, command_input=to_bytes(self.master_password))
self.token = out.strip()
def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False):
command = [self.cli_path] + args
p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
out, err = p.communicate(input=command_input)
rc = p.wait()
if not ignore_errors and rc != expected_rc:
raise AnsibleLookupError(to_text(err))
return rc, out, err
def _parse_field(self, data_json, field_name, section_title=None):
data = json.loads(data_json)
if section_title is None:
for field_data in data['details'].get('fields', []):
if field_data.get('name').lower() == field_name.lower():
return field_data.get('value', '')
for section_data in data['details'].get('sections', []):
if section_title is not None and section_title.lower() != section_data['title'].lower():
continue
for field_data in section_data.get('fields', []):
if field_data.get('t').lower() == field_name.lower():
return field_data.get('v', '')
return ''
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
op = OnePass()
field = kwargs.get('field', 'password')
section = kwargs.get('section')
vault = kwargs.get('vault')
op.subdomain = kwargs.get('subdomain')
op.username = kwargs.get('username')
op.secret_key = kwargs.get('secret_key')
op.master_password = kwargs.get('master_password', kwargs.get('vault_password'))
op.assert_logged_in()
values = []
for term in terms:
values.append(op.get_field(term, field, section, vault))
return values
| gpl-3.0 |
tianocore/buildtools-BaseTools | Source/Python/UPT/Parser/InfDefineSectionParser.py | 6 | 7396 | ## @file
# This file contained the parser for define sections in INF file
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
InfDefineSectionParser
'''
##
# Import Modules
#
import re
from Library import DataType as DT
from Library import GlobalData
from Library.Parsing import MacroParser
from Library.Misc import GetSplitValueList
from Library.ParserValidate import IsValidArch
from Object.Parser.InfCommonObject import InfLineCommentObject
from Object.Parser.InfDefineObject import InfDefMember
from Parser.InfParserMisc import InfExpandMacro
from Object.Parser.InfMisc import ErrorInInf
from Logger import StringTable as ST
from Parser.InfParserMisc import InfParserSectionRoot
## __GetValidateArchList
#
#
def GetValidateArchList(LineContent):
TempArch = ''
ArchList = []
ValidateAcrhPatten = re.compile(r"^\s*#\s*VALID_ARCHITECTURES\s*=\s*.*$", re.DOTALL)
if ValidateAcrhPatten.match(LineContent):
TempArch = GetSplitValueList(LineContent, DT.TAB_EQUAL_SPLIT, 1)[1]
TempArch = GetSplitValueList(TempArch, '(', 1)[0]
ArchList = re.split('\s+', TempArch)
NewArchList = []
for Arch in ArchList:
if IsValidArch(Arch):
NewArchList.append(Arch)
ArchList = NewArchList
return ArchList
class InfDefinSectionParser(InfParserSectionRoot):
def InfDefineParser(self, SectionString, InfSectionObject, FileName, SectionComment):
if SectionComment:
pass
#
# Parser Defines section content and fill self._ContentList dict.
#
StillCommentFalg = False
HeaderComments = []
SectionContent = ''
ArchList = []
_ContentList = []
_ValueList = []
#
# Add WORKSPACE to global Marco dict.
#
self.FileLocalMacros['WORKSPACE'] = GlobalData.gWORKSPACE
for Line in SectionString:
LineContent = Line[0]
LineNo = Line[1]
TailComments = ''
LineComment = None
LineInfo = ['', -1, '']
LineInfo[0] = FileName
LineInfo[1] = LineNo
LineInfo[2] = LineContent
if LineContent.strip() == '':
continue
#
# The first time encountered VALIDATE_ARCHITECHERS will be considered as support arch list.
#
if not ArchList:
ArchList = GetValidateArchList(LineContent)
#
# Parser Comment
#
if LineContent.strip().startswith(DT.TAB_COMMENT_SPLIT):
#
# Last line is comments, and this line go on.
#
if StillCommentFalg:
HeaderComments.append(Line)
SectionContent += LineContent + DT.END_OF_LINE
continue
#
# First time encounter comment
#
else:
#
# Clear original data
#
HeaderComments = []
HeaderComments.append(Line)
StillCommentFalg = True
SectionContent += LineContent + DT.END_OF_LINE
continue
else:
StillCommentFalg = False
if len(HeaderComments) >= 1:
LineComment = InfLineCommentObject()
LineCommentContent = ''
for Item in HeaderComments:
LineCommentContent += Item[0] + DT.END_OF_LINE
LineComment.SetHeaderComments(LineCommentContent)
#
# Find Tail comment.
#
if LineContent.find(DT.TAB_COMMENT_SPLIT) > -1:
TailComments = LineContent[LineContent.find(DT.TAB_COMMENT_SPLIT):]
LineContent = LineContent[:LineContent.find(DT.TAB_COMMENT_SPLIT)]
if LineComment == None:
LineComment = InfLineCommentObject()
LineComment.SetTailComments(TailComments)
#
# Find Macro
#
Name, Value = MacroParser((LineContent, LineNo),
FileName,
DT.MODEL_META_DATA_HEADER,
self.FileLocalMacros)
if Name != None:
self.FileLocalMacros[Name] = Value
continue
#
# Replace with [Defines] section Macro
#
LineContent = InfExpandMacro(LineContent,
(FileName, LineContent, LineNo),
self.FileLocalMacros,
None, True)
SectionContent += LineContent + DT.END_OF_LINE
TokenList = GetSplitValueList(LineContent, DT.TAB_EQUAL_SPLIT, 1)
if len(TokenList) < 2:
ErrorInInf(ST.ERR_INF_PARSER_DEFINE_ITEM_NO_VALUE,
LineInfo=LineInfo)
_ValueList[0:len(TokenList)] = TokenList
if not _ValueList[0]:
ErrorInInf(ST.ERR_INF_PARSER_DEFINE_ITEM_NO_NAME,
LineInfo=LineInfo)
if not _ValueList[1]:
ErrorInInf(ST.ERR_INF_PARSER_DEFINE_ITEM_NO_VALUE,
LineInfo=LineInfo)
Name, Value = _ValueList[0], _ValueList[1]
InfDefMemberObj = InfDefMember(Name, Value)
if (LineComment != None):
InfDefMemberObj.Comments.SetHeaderComments(LineComment.GetHeaderComments())
InfDefMemberObj.Comments.SetTailComments(LineComment.GetTailComments())
InfDefMemberObj.CurrentLine.SetFileName(self.FullPath)
InfDefMemberObj.CurrentLine.SetLineString(LineContent)
InfDefMemberObj.CurrentLine.SetLineNo(LineNo)
_ContentList.append(InfDefMemberObj)
HeaderComments = []
TailComments = ''
#
# Current Define section archs
#
if not ArchList:
ArchList = ['COMMON']
InfSectionObject.SetAllContent(SectionContent)
InfSectionObject.SetDefines(_ContentList, Arch=ArchList)
| bsd-2-clause |
kenshay/ImageScript | Script_Runner/PYTHON/Lib/site-packages/bcrypt/__about__.py | 4 | 1296 | # Author:: Donald Stufft (<donald@stufft.io>)
# Copyright:: Copyright (c) 2013 Donald Stufft
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "bcrypt"
__summary__ = "Modern password hashing for your software and your servers"
__uri__ = "https://github.com/pyca/bcrypt/"
__version__ = "3.1.4"
__author__ = "The Python Cryptographic Authority developers"
__email__ = "cryptography-dev@python.org"
__license__ = "Apache License, Version 2.0"
__copyright__ = "Copyright 2013-2016 {0}".format(__author__)
| gpl-3.0 |
LiaoPan/scikit-learn | sklearn/utils/linear_assignment_.py | 214 | 9413 | """
Solve the unique lowest-cost assignment problem using the
Hungarian algorithm (also known as Munkres algorithm).
"""
# Based on original code by Brain Clapper, adapted to NumPy by Gael Varoquaux.
# Heavily refactored by Lars Buitinck.
# Copyright (c) 2008 Brian M. Clapper <bmc@clapper.org>, Gael Varoquaux
# Author: Brian M. Clapper, Gael Varoquaux
# LICENSE: BSD
import numpy as np
from .fixes import astype
def linear_assignment(X):
"""Solve the linear assignment problem using the Hungarian algorithm.
The problem is also known as maximum weight matching in bipartite graphs.
The method is also known as the Munkres or Kuhn-Munkres algorithm.
Parameters
----------
X : array
The cost matrix of the bipartite graph
Returns
-------
indices : array,
The pairs of (row, col) indices in the original array giving
the original ordering.
References
----------
1. http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html
2. Harold W. Kuhn. The Hungarian Method for the assignment problem.
*Naval Research Logistics Quarterly*, 2:83-97, 1955.
3. Harold W. Kuhn. Variants of the Hungarian method for assignment
problems. *Naval Research Logistics Quarterly*, 3: 253-258, 1956.
4. Munkres, J. Algorithms for the Assignment and Transportation Problems.
*Journal of the Society of Industrial and Applied Mathematics*,
5(1):32-38, March, 1957.
5. http://en.wikipedia.org/wiki/Hungarian_algorithm
"""
indices = _hungarian(X).tolist()
indices.sort()
# Re-force dtype to ints in case of empty list
indices = np.array(indices, dtype=int)
# Make sure the array is 2D with 2 columns.
# This is needed when dealing with an empty list
indices.shape = (-1, 2)
return indices
class _HungarianState(object):
"""State of one execution of the Hungarian algorithm.
Parameters
----------
cost_matrix : 2D matrix
The cost matrix. Does not have to be square.
"""
def __init__(self, cost_matrix):
cost_matrix = np.atleast_2d(cost_matrix)
# If there are more rows (n) than columns (m), then the algorithm
# will not be able to work correctly. Therefore, we
# transpose the cost function when needed. Just have to
# remember to swap the result columns back later.
transposed = (cost_matrix.shape[1] < cost_matrix.shape[0])
if transposed:
self.C = (cost_matrix.T).copy()
else:
self.C = cost_matrix.copy()
self.transposed = transposed
# At this point, m >= n.
n, m = self.C.shape
self.row_uncovered = np.ones(n, dtype=np.bool)
self.col_uncovered = np.ones(m, dtype=np.bool)
self.Z0_r = 0
self.Z0_c = 0
self.path = np.zeros((n + m, 2), dtype=int)
self.marked = np.zeros((n, m), dtype=int)
def _find_prime_in_row(self, row):
"""
Find the first prime element in the specified row. Returns
the column index, or -1 if no starred element was found.
"""
col = np.argmax(self.marked[row] == 2)
if self.marked[row, col] != 2:
col = -1
return col
def _clear_covers(self):
"""Clear all covered matrix cells"""
self.row_uncovered[:] = True
self.col_uncovered[:] = True
def _hungarian(cost_matrix):
"""The Hungarian algorithm.
Calculate the Munkres solution to the classical assignment problem and
return the indices for the lowest-cost pairings.
Parameters
----------
cost_matrix : 2D matrix
The cost matrix. Does not have to be square.
Returns
-------
indices : 2D array of indices
The pairs of (row, col) indices in the original array giving
the original ordering.
"""
state = _HungarianState(cost_matrix)
# No need to bother with assignments if one of the dimensions
# of the cost matrix is zero-length.
step = None if 0 in cost_matrix.shape else _step1
while step is not None:
step = step(state)
# Look for the starred columns
results = np.array(np.where(state.marked == 1)).T
# We need to swap the columns because we originally
# did a transpose on the input cost matrix.
if state.transposed:
results = results[:, ::-1]
return results
# Individual steps of the algorithm follow, as a state machine: they return
# the next step to be taken (function to be called), if any.
def _step1(state):
"""Steps 1 and 2 in the Wikipedia page."""
# Step1: For each row of the matrix, find the smallest element and
# subtract it from every element in its row.
state.C -= state.C.min(axis=1)[:, np.newaxis]
# Step2: Find a zero (Z) in the resulting matrix. If there is no
# starred zero in its row or column, star Z. Repeat for each element
# in the matrix.
for i, j in zip(*np.where(state.C == 0)):
if state.col_uncovered[j] and state.row_uncovered[i]:
state.marked[i, j] = 1
state.col_uncovered[j] = False
state.row_uncovered[i] = False
state._clear_covers()
return _step3
def _step3(state):
"""
Cover each column containing a starred zero. If n columns are covered,
the starred zeros describe a complete set of unique assignments.
In this case, Go to DONE, otherwise, Go to Step 4.
"""
marked = (state.marked == 1)
state.col_uncovered[np.any(marked, axis=0)] = False
if marked.sum() < state.C.shape[0]:
return _step4
def _step4(state):
"""
Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6.
"""
# We convert to int as numpy operations are faster on int
C = (state.C == 0).astype(np.int)
covered_C = C * state.row_uncovered[:, np.newaxis]
covered_C *= astype(state.col_uncovered, dtype=np.int, copy=False)
n = state.C.shape[0]
m = state.C.shape[1]
while True:
# Find an uncovered zero
row, col = np.unravel_index(np.argmax(covered_C), (n, m))
if covered_C[row, col] == 0:
return _step6
else:
state.marked[row, col] = 2
# Find the first starred element in the row
star_col = np.argmax(state.marked[row] == 1)
if not state.marked[row, star_col] == 1:
# Could not find one
state.Z0_r = row
state.Z0_c = col
return _step5
else:
col = star_col
state.row_uncovered[row] = False
state.col_uncovered[col] = True
covered_C[:, col] = C[:, col] * (
astype(state.row_uncovered, dtype=np.int, copy=False))
covered_C[row] = 0
def _step5(state):
"""
Construct a series of alternating primed and starred zeros as follows.
Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0 (if any).
Let Z2 denote the primed zero in the row of Z1 (there will always be one).
Continue until the series terminates at a primed zero that has no starred
zero in its column. Unstar each starred zero of the series, star each
primed zero of the series, erase all primes and uncover every line in the
matrix. Return to Step 3
"""
count = 0
path = state.path
path[count, 0] = state.Z0_r
path[count, 1] = state.Z0_c
while True:
# Find the first starred element in the col defined by
# the path.
row = np.argmax(state.marked[:, path[count, 1]] == 1)
if not state.marked[row, path[count, 1]] == 1:
# Could not find one
break
else:
count += 1
path[count, 0] = row
path[count, 1] = path[count - 1, 1]
# Find the first prime element in the row defined by the
# first path step
col = np.argmax(state.marked[path[count, 0]] == 2)
if state.marked[row, col] != 2:
col = -1
count += 1
path[count, 0] = path[count - 1, 0]
path[count, 1] = col
# Convert paths
for i in range(count + 1):
if state.marked[path[i, 0], path[i, 1]] == 1:
state.marked[path[i, 0], path[i, 1]] = 0
else:
state.marked[path[i, 0], path[i, 1]] = 1
state._clear_covers()
# Erase all prime markings
state.marked[state.marked == 2] = 0
return _step3
def _step6(state):
"""
Add the value found in Step 4 to every element of each covered row,
and subtract it from every element of each uncovered column.
Return to Step 4 without altering any stars, primes, or covered lines.
"""
# the smallest uncovered value in the matrix
if np.any(state.row_uncovered) and np.any(state.col_uncovered):
minval = np.min(state.C[state.row_uncovered], axis=0)
minval = np.min(minval[state.col_uncovered])
state.C[np.logical_not(state.row_uncovered)] += minval
state.C[:, state.col_uncovered] -= minval
return _step4
| bsd-3-clause |
AlphaSmartDog/DeepLearningNotes | Note-1 RNN-DNC择时/Note-1-2 PonderingDNCore L2正则化示例/sonnet/python/modules/experimental.py | 10 | 1343 | # Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module for experimental sonnet functions and classes.
This file contains functions and classes that are being tested until they're
either removed or promoted into the wider sonnet library.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from sonnet.python.modules import util
from tensorflow.python.util import deprecation
@deprecation.deprecated(
"2017-08-01",
"The @snt.experimental.reuse_vars decorator has been moved to "
"@snt.reuse_variables. Please change to use the new location. ")
def reuse_vars(method):
return util.reuse_variables(method)
| mit |
ryfeus/lambda-packs | Tensorflow_LightGBM_Scipy_nightly/source/numpy/linalg/linalg.py | 3 | 82838 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import operator
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot,
add, multiply, sqrt, fastCopyAndTranspose, sum, isfinite,
finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs,
atleast_2d, intp, asanyarray, object_, matmul,
swapaxes, divide, count_nonzero, isnan
)
from numpy.core.multiarray import normalize_axis_index
from numpy.lib.twodim_base import triu, eye
from numpy.linalg import lapack_lite, _umath_linalg
# For Python2/3 compatibility
_N = b'N'
_V = b'V'
_A = b'A'
_S = b'S'
_L = b'L'
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
def _determine_error_states():
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
return [bufsize, invalid_call_errmask, None]
# Dealing with errors in _umath_linalg
_linalg_error_extobj = _determine_error_states()
del _determine_error_states
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def _raise_linalgerror_lstsq(err, flag):
raise LinAlgError("SVD did not converge in Linear Least Squares")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj) # make a copy
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if a.ndim != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % a.ndim)
def _assertRankAtLeast2(*arrays):
for a in arrays:
if a.ndim < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
m, n = a.shape[-2:]
if m != n:
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _isEmpty2d(arr):
# check size first for efficiency
return arr.size == 0 and product(arr.shape[-2:]) == 0
def _assertNoEmpty2d(*arrays):
for a in arrays:
if _isEmpty2d(a):
raise LinAlgError("Arrays cannot be empty")
def transpose(a):
"""
Transpose each matrix in a stack of matrices.
Unlike np.transpose, this only swaps the last two axes, rather than all of
them
Parameters
----------
a : (...,M,N) array_like
Returns
-------
aT : (...,N,M) ndarray
"""
return swapaxes(a, -1, -2)
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=b.ndim)``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
gufunc = _umath_linalg.solve1
else:
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
def matrix_power(a, n):
"""
Raise a square matrix to the (integer) power `n`.
For positive integers `n`, the power is computed by repeated matrix
squarings and matrix multiplications. If ``n == 0``, the identity matrix
of the same shape as M is returned. If ``n < 0``, the inverse
is computed and then raised to the ``abs(n)``.
.. note:: Stacks of object matrices are not currently supported.
Parameters
----------
a : (..., M, M) array_like
Matrix to be "powered."
n : int
The exponent can be any integer or long integer, positive,
negative, or zero.
Returns
-------
a**n : (..., M, M) ndarray or matrix object
The return value is the same shape and type as `M`;
if the exponent is positive or zero then the type of the
elements is the same as those of `M`. If the exponent is
negative the elements are floating-point.
Raises
------
LinAlgError
For matrices that are not square or that (for negative powers) cannot
be inverted numerically.
Examples
--------
>>> from numpy.linalg import matrix_power
>>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
>>> matrix_power(i, 3) # should = -i
array([[ 0, -1],
[ 1, 0]])
>>> matrix_power(i, 0)
array([[1, 0],
[0, 1]])
>>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
array([[ 0., 1.],
[-1., 0.]])
Somewhat more sophisticated example
>>> q = np.zeros((4, 4))
>>> q[0:2, 0:2] = -i
>>> q[2:4, 2:4] = i
>>> q # one of the three quaternion units not equal to 1
array([[ 0., -1., 0., 0.],
[ 1., 0., 0., 0.],
[ 0., 0., 0., 1.],
[ 0., 0., -1., 0.]])
>>> matrix_power(q, 2) # = -np.eye(4)
array([[-1., 0., 0., 0.],
[ 0., -1., 0., 0.],
[ 0., 0., -1., 0.],
[ 0., 0., 0., -1.]])
"""
a = asanyarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
try:
n = operator.index(n)
except TypeError:
raise TypeError("exponent must be an integer")
# Fall back on dot for object arrays. Object arrays are not supported by
# the current implementation of matmul using einsum
if a.dtype != object:
fmatmul = matmul
elif a.ndim == 2:
fmatmul = dot
else:
raise NotImplementedError(
"matrix_power not supported for stacks of object arrays")
if n == 0:
a = empty_like(a)
a[...] = eye(a.shape[-2], dtype=a.dtype)
return a
elif n < 0:
a = inv(a)
n = abs(n)
# short-cuts.
if n == 1:
return a
elif n == 2:
return fmatmul(a, a)
elif n == 3:
return fmatmul(fmatmul(a, a), a)
# Use binary decomposition to reduce the number of matrix multiplications.
# Here, we iterate over the bits of n, from LSB to MSB, raise `a` to
# increasing powers of 2, and multiply into the result as needed.
z = result = None
while n > 0:
z = a if z is None else fmatmul(z, z)
n, bit = divmod(n, 2)
if bit:
result = z if result is None else fmatmul(result, z)
return result
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
* 'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
* 'complete' : returns q, r with dimensions (M, M), (M, N)
* 'r' : returns r only with dimensions (K, N)
* 'raw' : returns h, tau with dimensions (N, M), (K,)
* 'full' : alias of 'reduced', deprecated
* 'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced', and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()
>>> # with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa = LA.eigvalsh(a)
>>> wb = LA.eigvals(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa, va = LA.eigh(a)
>>> wb, vb = LA.eig(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
>>> va; vb
array([[-0.44721360-0.j , -0.89442719+0.j ],
[ 0.00000000+0.89442719j, 0.00000000-0.4472136j ]])
array([[ 0.89442719+0.j , 0.00000000-0.4472136j],
[ 0.00000000-0.4472136j, 0.89442719+0.j ]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=True, compute_uv=True):
"""
Singular Value Decomposition.
When `a` is a 2D array, it is factorized as ``u @ np.diag(s) @ vh
= (u * s) @ vh``, where `u` and `vh` are 2D unitary arrays and `s` is a 1D
array of `a`'s singular values. When `a` is higher-dimensional, SVD is
applied in stacked mode as explained below.
Parameters
----------
a : (..., M, N) array_like
A real or complex array with ``a.ndim >= 2``.
full_matrices : bool, optional
If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and
``(..., N, N)``, respectively. Otherwise, the shapes are
``(..., M, K)`` and ``(..., K, N)``, respectively, where
``K = min(M, N)``.
compute_uv : bool, optional
Whether or not to compute `u` and `vh` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
s : (..., K) array
Vector(s) with the singular values, within each vector sorted in
descending order. The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`.
vh : { (..., N, N), (..., K, N) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionchanged:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine ``_gesdd``.
SVD is usually described for the factorization of a 2D matrix :math:`A`.
The higher-dimensional case will be discussed below. In the 2D case, SVD is
written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`,
:math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s`
contains the singular values of `a` and `u` and `vh` are unitary. The rows
of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are
the eigenvectors of :math:`A A^H`. In both cases the corresponding
(possibly non-zero) eigenvalues are given by ``s**2``.
If `a` has more than two dimensions, then broadcasting rules apply, as
explained in :ref:`routines.linalg-broadcasting`. This means that SVD is
working in "stacked" mode: it iterates over all indices of the first
``a.ndim - 2`` dimensions and for each combination SVD is applied to the
last two indices. The matrix `a` can be reconstructed from the
decomposition with either ``(u * s[..., None, :]) @ vh`` or
``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the
function ``np.matmul`` for python versions below 3.5.)
If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are
all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
>>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3)
Reconstruction based on full SVD, 2D case:
>>> u, s, vh = np.linalg.svd(a, full_matrices=True)
>>> u.shape, s.shape, vh.shape
((9, 9), (6,), (6, 6))
>>> np.allclose(a, np.dot(u[:, :6] * s, vh))
True
>>> smat = np.zeros((9, 6), dtype=complex)
>>> smat[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
True
Reconstruction based on reduced SVD, 2D case:
>>> u, s, vh = np.linalg.svd(a, full_matrices=False)
>>> u.shape, s.shape, vh.shape
((9, 6), (6,), (6, 6))
>>> np.allclose(a, np.dot(u * s, vh))
True
>>> smat = np.diag(s)
>>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
True
Reconstruction based on full SVD, 4D case:
>>> u, s, vh = np.linalg.svd(b, full_matrices=True)
>>> u.shape, s.shape, vh.shape
((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3))
>>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh))
True
>>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh))
True
Reconstruction based on reduced SVD, 4D case:
>>> u, s, vh = np.linalg.svd(b, full_matrices=False)
>>> u.shape, s.shape, vh.shape
((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3))
>>> np.allclose(b, np.matmul(u * s[..., None, :], vh))
True
>>> np.allclose(b, np.matmul(u, s[..., None] * vh))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m, n = a.shape[-2:]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vh = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vh = vh.astype(result_t, copy=False)
return wrap(u), s, wrap(vh)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None or p == 2 or p == -2:
s = svd(x, compute_uv=False)
with errstate(all='ignore'):
if p == -2:
r = s[..., -1] / s[..., 0]
else:
r = s[..., 0] / s[..., -1]
else:
# Call inv(x) ignoring errors. The result array will
# contain nans in the entries where inversion failed.
_assertRankAtLeast2(x)
_assertNdSquareness(x)
t, result_t = _commonType(x)
signature = 'D->D' if isComplexType(t) else 'd->d'
with errstate(all='ignore'):
invx = _umath_linalg.inv(x, signature=signature)
r = norm(x, p, axis=(-2, -1)) * norm(invx, p, axis=(-2, -1))
r = r.astype(result_t, copy=False)
# Convert nans to infs unless the original array had nan entries
r = asarray(r)
nan_mask = isnan(r)
if nan_mask.any():
nan_mask &= ~isnan(x).any(axis=(-2, -1))
if r.ndim > 0:
r[nan_mask] = Inf
elif nan_mask:
r[()] = Inf
# Convention is to return scalars instead of 0d arrays
if r.ndim == 0:
r = r[()]
return r
def matrix_rank(M, tol=None, hermitian=False):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of singular values of the array that are
greater than `tol`.
.. versionchanged:: 1.14
Can now operate on stacks of matrices
Parameters
----------
M : {(M,), (..., M, N)} array_like
input vector or stack of matrices
tol : (...) array_like, float, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
.. versionchanged:: 1.14
Broadcasted against the stack of matrices
hermitian : bool, optional
If True, `M` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
.. versionadded:: 1.14
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim < 2:
return int(not all(M==0))
if hermitian:
S = abs(eigvalsh(M))
else:
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps
else:
tol = asarray(tol)[..., newaxis]
return count_nonzero(S > tol, axis=-1)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
.. versionchanged:: 1.14
Can now operate on stacks of matrices
Parameters
----------
a : (..., M, N) array_like
Matrix or stack of matrices to be pseudo-inverted.
rcond : (...) array_like of float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero. Broadcasts against the stack of matrices
Returns
-------
B : (..., N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
rcond = asarray(rcond)
if _isEmpty2d(a):
m, n = a.shape[-2:]
res = empty(a.shape[:-2] + (n, m), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
u, s, vt = svd(a, full_matrices=False)
# discard small singular values
cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True)
large = s > cutoff
s = divide(1, s, where=large, out=s)
s[~large] = 0
res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
sign = sign.astype(result_t, copy=False)
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to represent the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond="warn"):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
.. versionchanged:: 1.14.0
If not set, a FutureWarning is given. The previous default
of ``-1`` will use the machine precision as `rcond` parameter,
the new default will use the machine precision times `max(M, N)`.
To silence the warning and use the new default, use ``rcond=None``,
to keep using the old behavior, use ``rcond=-1``.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(1,), (K,), (0,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y, rcond=None)[0]
>>> print(m, c)
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = b.ndim == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
_assertNoEmpty2d(a, b) # TODO: relax this constraint
m, n = a.shape[-2:]
m2, n_rhs = b.shape[-2:]
if m != m2:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
real_t = _linalgRealType(t)
result_real_t = _realType(result_t)
# Determine default rcond value
if rcond == "warn":
# 2017-08-19, 1.14.0
warnings.warn("`rcond` parameter will change to the default of "
"machine precision times ``max(M, N)`` where M and N "
"are the input matrix dimensions.\n"
"To use the future default and silence this warning "
"we advise to pass `rcond=None`, to keep using the old, "
"explicitly pass `rcond=-1`.",
FutureWarning, stacklevel=2)
rcond = -1
if rcond is None:
rcond = finfo(t).eps * max(n, m)
if m <= n:
gufunc = _umath_linalg.lstsq_m
else:
gufunc = _umath_linalg.lstsq_n
signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid'
extobj = get_linalg_error_extobj(_raise_linalgerror_lstsq)
x, resids, rank, s = gufunc(a, b, rcond, signature=signature, extobj=extobj)
# remove the axis we added
if is_1d:
x = x.squeeze(axis=-1)
# we probably should squeeze resids too, but we can't
# without breaking compatibility.
# as documented
if rank != n or m <= n:
resids = array([], result_real_t)
# coerce output arrays
s = s.astype(result_real_t, copy=False)
resids = resids.astype(result_real_t, copy=False)
x = x.astype(result_t, copy=True) # Copying lets the memory in r_parts be freed
return wrap(x), wrap(resids), rank, s
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
y = moveaxis(x, (row_axis, col_axis), (-2, -1))
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except Exception:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
absx = abs(x)
absx **= ord
ret = add.reduce(absx, axis=axis, keepdims=keepdims)
ret **= (1 / ord)
return ret
elif len(axis) == 2:
row_axis, col_axis = axis
row_axis = normalize_axis_index(row_axis, nd)
col_axis = normalize_axis_index(col_axis, nd)
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Notes
-----
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B):
return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
a0, a1b0 = A.shape
b1c0, c1 = C.shape
# cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
cost1 = a0 * b1c0 * (a1b0 + c1)
# cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
cost2 = a1b0 * c1 * (a0 + b1c0)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
| mit |
siliconsmiley/QGIS | python/pyplugin_installer/qgsplugininstallerpluginerrordialog.py | 11 | 1767 | # -*- coding:utf-8 -*-
"""
/***************************************************************************
qgsplugininstallerpluginerrordialog.py
Plugin Installer module
-------------------
Date : June 2013
Copyright : (C) 2013 by Borys Jurgiel
Email : info at borysjurgiel dot pl
This module is based on former plugin_installer plugin:
Copyright (C) 2007-2008 Matthew Perry
Copyright (C) 2008-2013 Borys Jurgiel
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtGui import QDialog
from ui_qgsplugininstallerpluginerrorbase import Ui_QgsPluginInstallerPluginErrorDialogBase
class QgsPluginInstallerPluginErrorDialog(QDialog, Ui_QgsPluginInstallerPluginErrorDialogBase):
# ----------------------------------------- #
def __init__(self, parent, errorMessage):
QDialog.__init__(self, parent)
self.setupUi(self)
if not errorMessage:
errorMessage = self.tr("no error message received")
self.textBrowser.setText(errorMessage)
| gpl-2.0 |
tunneln/CarnotKE | jyhton/lib-python/2.7/unittest/suite.py | 243 | 9809 | """TestSuite"""
import sys
from . import case
from . import util
__unittest = True
def _call_if_exists(parent, attr):
func = getattr(parent, attr, lambda: None)
func()
class BaseTestSuite(object):
"""A simple test suite that doesn't provide class or module shared fixtures.
"""
def __init__(self, tests=()):
self._tests = []
self.addTests(tests)
def __repr__(self):
return "<%s tests=%s>" % (util.strclass(self.__class__), list(self))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return list(self) == list(other)
def __ne__(self, other):
return not self == other
# Can't guarantee hash invariant, so flag as unhashable
__hash__ = None
def __iter__(self):
return iter(self._tests)
def countTestCases(self):
cases = 0
for test in self:
cases += test.countTestCases()
return cases
def addTest(self, test):
# sanity checks
if not hasattr(test, '__call__'):
raise TypeError("{} is not callable".format(repr(test)))
if isinstance(test, type) and issubclass(test,
(case.TestCase, TestSuite)):
raise TypeError("TestCases and TestSuites must be instantiated "
"before passing them to addTest()")
self._tests.append(test)
def addTests(self, tests):
if isinstance(tests, basestring):
raise TypeError("tests must be an iterable of tests, not a string")
for test in tests:
self.addTest(test)
def run(self, result):
for test in self:
if result.shouldStop:
break
test(result)
return result
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
for test in self:
test.debug()
class TestSuite(BaseTestSuite):
"""A test suite is a composite test consisting of a number of TestCases.
For use, create an instance of TestSuite, then add test case instances.
When all tests have been added, the suite can be passed to a test
runner, such as TextTestRunner. It will run the individual test cases
in the order in which they were added, aggregating the results. When
subclassing, do not forget to call the base class constructor.
"""
def run(self, result, debug=False):
topLevel = False
if getattr(result, '_testRunEntered', False) is False:
result._testRunEntered = topLevel = True
for test in self:
if result.shouldStop:
break
if _isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
continue
if not debug:
test(result)
else:
test.debug()
if topLevel:
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
result._testRunEntered = False
return result
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
debug = _DebugResult()
self.run(debug, True)
################################
def _handleClassSetUp(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if result._moduleSetUpFailed:
return
if getattr(currentClass, "__unittest_skip__", False):
return
try:
currentClass._classSetupFailed = False
except TypeError:
# test may actually be a function
# so its class will be a builtin-type
pass
setUpClass = getattr(currentClass, 'setUpClass', None)
if setUpClass is not None:
_call_if_exists(result, '_setupStdout')
try:
setUpClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
errorName = 'setUpClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _get_previous_module(self, result):
previousModule = None
previousClass = getattr(result, '_previousTestClass', None)
if previousClass is not None:
previousModule = previousClass.__module__
return previousModule
def _handleModuleFixture(self, test, result):
previousModule = self._get_previous_module(result)
currentModule = test.__class__.__module__
if currentModule == previousModule:
return
self._handleModuleTearDown(result)
result._moduleSetUpFailed = False
try:
module = sys.modules[currentModule]
except KeyError:
return
setUpModule = getattr(module, 'setUpModule', None)
if setUpModule is not None:
_call_if_exists(result, '_setupStdout')
try:
setUpModule()
except Exception, e:
if isinstance(result, _DebugResult):
raise
result._moduleSetUpFailed = True
errorName = 'setUpModule (%s)' % currentModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _addClassOrModuleLevelException(self, result, exception, errorName):
error = _ErrorHolder(errorName)
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None and isinstance(exception, case.SkipTest):
addSkip(error, str(exception))
else:
result.addError(error, sys.exc_info())
def _handleModuleTearDown(self, result):
previousModule = self._get_previous_module(result)
if previousModule is None:
return
if result._moduleSetUpFailed:
return
try:
module = sys.modules[previousModule]
except KeyError:
return
tearDownModule = getattr(module, 'tearDownModule', None)
if tearDownModule is not None:
_call_if_exists(result, '_setupStdout')
try:
tearDownModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
errorName = 'tearDownModule (%s)' % previousModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _tearDownPreviousClass(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if getattr(previousClass, '_classSetupFailed', False):
return
if getattr(result, '_moduleSetUpFailed', False):
return
if getattr(previousClass, "__unittest_skip__", False):
return
tearDownClass = getattr(previousClass, 'tearDownClass', None)
if tearDownClass is not None:
_call_if_exists(result, '_setupStdout')
try:
tearDownClass()
except Exception, e:
if isinstance(result, _DebugResult):
raise
className = util.strclass(previousClass)
errorName = 'tearDownClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
class _ErrorHolder(object):
"""
Placeholder for a TestCase inside a result. As far as a TestResult
is concerned, this looks exactly like a unit test. Used to insert
arbitrary errors into a test suite run.
"""
# Inspired by the ErrorHolder from Twisted:
# http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py
# attribute used by TestResult._exc_info_to_string
failureException = None
def __init__(self, description):
self.description = description
def id(self):
return self.description
def shortDescription(self):
return None
def __repr__(self):
return "<ErrorHolder description=%r>" % (self.description,)
def __str__(self):
return self.id()
def run(self, result):
# could call result.addError(...) - but this test-like object
# shouldn't be run anyway
pass
def __call__(self, result):
return self.run(result)
def countTestCases(self):
return 0
def _isnotsuite(test):
"A crude way to tell apart testcases and suites with duck-typing"
try:
iter(test)
except TypeError:
return True
return False
class _DebugResult(object):
"Used by the TestSuite to hold previous class when running in debug."
_previousTestClass = None
_moduleSetUpFailed = False
shouldStop = False
| apache-2.0 |
s-store/sstore-soft | third_party/python/boto/ec2/reservedinstance.py | 31 | 4028 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.ec2.ec2object import EC2Object
class ReservedInstancesOffering(EC2Object):
def __init__(self, connection=None, id=None, instance_type=None,
availability_zone=None, duration=None, fixed_price=None,
usage_price=None, description=None):
EC2Object.__init__(self, connection)
self.id = id
self.instance_type = instance_type
self.availability_zone = availability_zone
self.duration = duration
self.fixed_price = fixed_price
self.usage_price = usage_price
self.description = description
def __repr__(self):
return 'ReservedInstanceOffering:%s' % self.id
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'reservedInstancesOfferingId':
self.id = value
elif name == 'instanceType':
self.instance_type = value
elif name == 'availabilityZone':
self.availability_zone = value
elif name == 'duration':
self.duration = value
elif name == 'fixedPrice':
self.fixed_price = value
elif name == 'usagePrice':
self.usage_price = value
elif name == 'productDescription':
self.description = value
else:
setattr(self, name, value)
def describe(self):
print 'ID=%s' % self.id
print '\tInstance Type=%s' % self.instance_type
print '\tZone=%s' % self.availability_zone
print '\tDuration=%s' % self.duration
print '\tFixed Price=%s' % self.fixed_price
print '\tUsage Price=%s' % self.usage_price
print '\tDescription=%s' % self.description
def purchase(self, instance_count=1):
return self.connection.purchase_reserved_instance_offering(self.id, instance_count)
class ReservedInstance(ReservedInstancesOffering):
def __init__(self, connection=None, id=None, instance_type=None,
availability_zone=None, duration=None, fixed_price=None,
usage_price=None, description=None,
instance_count=None, state=None):
ReservedInstancesOffering.__init__(self, connection, id, instance_type,
availability_zone, duration, fixed_price,
usage_price, description)
self.instance_count = instance_count
self.state = state
def __repr__(self):
return 'ReservedInstance:%s' % self.id
def endElement(self, name, value, connection):
if name == 'reservedInstancesId':
self.id = value
if name == 'instanceCount':
self.instance_count = int(value)
elif name == 'state':
self.state = value
else:
ReservedInstancesOffering.endElement(self, name, value, connection)
| gpl-3.0 |
mumer92/AFE | internals/urlgrabber/mirror.py | 4 | 17958 | # This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
# This file is part of urlgrabber, a high-level cross-protocol url-grabber
# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
"""Module for downloading files from a pool of mirrors
DESCRIPTION
This module provides support for downloading files from a pool of
mirrors with configurable failover policies. To a large extent, the
failover policy is chosen by using different classes derived from
the main class, MirrorGroup.
Instances of MirrorGroup (and cousins) act very much like URLGrabber
instances in that they have urlread, urlgrab, and urlopen methods.
They can therefore, be used in very similar ways.
from urlgrabber.grabber import URLGrabber
from urlgrabber.mirror import MirrorGroup
gr = URLGrabber()
mg = MirrorGroup(gr, ['http://foo.com/some/directory/',
'http://bar.org/maybe/somewhere/else/',
'ftp://baz.net/some/other/place/entirely/']
mg.urlgrab('relative/path.zip')
The assumption is that all mirrors are identical AFTER the base urls
specified, so that any mirror can be used to fetch any file.
FAILOVER
The failover mechanism is designed to be customized by subclassing
from MirrorGroup to change the details of the behavior. In general,
the classes maintain a master mirror list and a "current mirror"
index. When a download is initiated, a copy of this list and index
is created for that download only. The specific failover policy
depends on the class used, and so is documented in the class
documentation. Note that ANY behavior of the class can be
overridden, so any failover policy at all is possible (although
you may need to change the interface in extreme cases).
CUSTOMIZATION
Most customization of a MirrorGroup object is done at instantiation
time (or via subclassing). There are four major types of
customization:
1) Pass in a custom urlgrabber - The passed in urlgrabber will be
used (by default... see #2) for the grabs, so options to it
apply for the url-fetching
2) Custom mirror list - Mirror lists can simply be a list of
stings mirrors (as shown in the example above) but each can
also be a dict, allowing for more options. For example, the
first mirror in the list above could also have been:
{'mirror': 'http://foo.com/some/directory/',
'grabber': <a custom grabber to be used for this mirror>,
'kwargs': { <a dict of arguments passed to the grabber> }}
All mirrors are converted to this format internally. If
'grabber' is omitted, the default grabber will be used. If
kwargs are omitted, then (duh) they will not be used.
3) Pass keyword arguments when instantiating the mirror group.
See, for example, the failure_callback argument.
4) Finally, any kwargs passed in for the specific file (to the
urlgrab method, for example) will be folded in. The options
passed into the grabber's urlXXX methods will override any
options specified in a custom mirror dict.
"""
import random
import thread # needed for locking to make this threadsafe
from grabber import URLGrabError, CallbackObject, DEBUG
def _(st):
return st
class GrabRequest:
"""This is a dummy class used to hold information about the specific
request. For example, a single file. By maintaining this information
separately, we can accomplish two things:
1) make it a little easier to be threadsafe
2) have request-specific parameters
"""
pass
class MirrorGroup:
"""Base Mirror class
Instances of this class are built with a grabber object and a list
of mirrors. Then all calls to urlXXX should be passed relative urls.
The requested file will be searched for on the first mirror. If the
grabber raises an exception (possibly after some retries) then that
mirror will be removed from the list, and the next will be attempted.
If all mirrors are exhausted, then an exception will be raised.
MirrorGroup has the following failover policy:
* downloads begin with the first mirror
* by default (see default_action below) a failure (after retries)
causes it to increment the local AND master indices. Also,
the current mirror is removed from the local list (but NOT the
master list - the mirror can potentially be used for other
files)
* if the local list is ever exhausted, a URLGrabError will be
raised (errno=256, no more mirrors)
OPTIONS
In addition to the required arguments "grabber" and "mirrors",
MirrorGroup also takes the following optional arguments:
default_action
A dict that describes the actions to be taken upon failure
(after retries). default_action can contain any of the
following keys (shown here with their default values):
default_action = {'increment': 1,
'increment_master': 1,
'remove': 1,
'remove_master': 0,
'fail': 0}
In this context, 'increment' means "use the next mirror" and
'remove' means "never use this mirror again". The two
'master' values refer to the instance-level mirror list (used
for all files), whereas the non-master values refer to the
current download only.
The 'fail' option will cause immediate failure by re-raising
the exception and no further attempts to get the current
download.
This dict can be set at instantiation time,
mg = MirrorGroup(grabber, mirrors, default_action={'fail':1})
at method-execution time (only applies to current fetch),
filename = mg.urlgrab(url, default_action={'increment': 0})
or by returning an action dict from the failure_callback
return {'fail':0}
in increasing precedence.
If all three of these were done, the net result would be:
{'increment': 0, # set in method
'increment_master': 1, # class default
'remove': 1, # class default
'remove_master': 0, # class default
'fail': 0} # set at instantiation, reset
# from callback
failure_callback
this is a callback that will be called when a mirror "fails",
meaning the grabber raises some URLGrabError. If this is a
tuple, it is interpreted to be of the form (cb, args, kwargs)
where cb is the actual callable object (function, method,
etc). Otherwise, it is assumed to be the callable object
itself. The callback will be passed a grabber.CallbackObject
instance along with args and kwargs (if present). The following
attributes are defined withing the instance:
obj.exception = < exception that was raised >
obj.mirror = < the mirror that was tried >
obj.relative_url = < url relative to the mirror >
obj.url = < full url that failed >
# .url is just the combination of .mirror
# and .relative_url
The failure callback can return an action dict, as described
above.
Like default_action, the failure_callback can be set at
instantiation time or when the urlXXX method is called. In
the latter case, it applies only for that fetch.
The callback can re-raise the exception quite easily. For
example, this is a perfectly adequate callback function:
def callback(obj): raise obj.exception
WARNING: do not save the exception object (or the
CallbackObject instance). As they contain stack frame
references, they can lead to circular references.
Notes:
* The behavior can be customized by deriving and overriding the
'CONFIGURATION METHODS'
* The 'grabber' instance is kept as a reference, not copied.
Therefore, the grabber instance can be modified externally
and changes will take effect immediately.
"""
# notes on thread-safety:
# A GrabRequest should never be shared by multiple threads because
# it's never saved inside the MG object and never returned outside it.
# therefore, it should be safe to access/modify grabrequest data
# without a lock. However, accessing the mirrors and _next attributes
# of the MG itself must be done when locked to prevent (for example)
# removal of the wrong mirror.
##############################################################
# CONFIGURATION METHODS - intended to be overridden to
# customize behavior
def __init__(self, grabber, mirrors, **kwargs):
"""Initialize the MirrorGroup object.
REQUIRED ARGUMENTS
grabber - URLGrabber instance
mirrors - a list of mirrors
OPTIONAL ARGUMENTS
failure_callback - callback to be used when a mirror fails
default_action - dict of failure actions
See the module-level and class level documentation for more
details.
"""
# OVERRIDE IDEAS:
# shuffle the list to randomize order
self.grabber = grabber
self.mirrors = self._parse_mirrors(mirrors)
self._next = 0
self._lock = thread.allocate_lock()
self.default_action = None
self._process_kwargs(kwargs)
# if these values are found in **kwargs passed to one of the urlXXX
# methods, they will be stripped before getting passed on to the
# grabber
options = ['default_action', 'failure_callback']
def _process_kwargs(self, kwargs):
self.failure_callback = kwargs.get('failure_callback')
self.default_action = kwargs.get('default_action')
def _parse_mirrors(self, mirrors):
parsed_mirrors = []
for m in mirrors:
if type(m) == type(''): m = {'mirror': m}
parsed_mirrors.append(m)
return parsed_mirrors
def _load_gr(self, gr):
# OVERRIDE IDEAS:
# shuffle gr list
self._lock.acquire()
gr.mirrors = list(self.mirrors)
gr._next = self._next
self._lock.release()
def _get_mirror(self, gr):
# OVERRIDE IDEAS:
# return a random mirror so that multiple mirrors get used
# even without failures.
if not gr.mirrors:
raise URLGrabError(256, _('No more mirrors to try.'))
return gr.mirrors[gr._next]
def _failure(self, gr, cb_obj):
# OVERRIDE IDEAS:
# inspect the error - remove=1 for 404, remove=2 for connection
# refused, etc. (this can also be done via
# the callback)
cb = gr.kw.get('failure_callback') or self.failure_callback
if cb:
if type(cb) == type( () ):
cb, args, kwargs = cb
else:
args, kwargs = (), {}
action = cb(cb_obj, *args, **kwargs) or {}
else:
action = {}
# XXXX - decide - there are two ways to do this
# the first is action-overriding as a whole - use the entire action
# or fall back on module level defaults
#action = action or gr.kw.get('default_action') or self.default_action
# the other is to fall through for each element in the action dict
a = dict(self.default_action or {})
a.update(gr.kw.get('default_action', {}))
a.update(action)
action = a
self.increment_mirror(gr, action)
if action and action.get('fail', 0): raise
def increment_mirror(self, gr, action={}):
"""Tell the mirror object increment the mirror index
This increments the mirror index, which amounts to telling the
mirror object to use a different mirror (for this and future
downloads).
This is a SEMI-public method. It will be called internally,
and you may never need to call it. However, it is provided
(and is made public) so that the calling program can increment
the mirror choice for methods like urlopen. For example, with
urlopen, there's no good way for the mirror group to know that
an error occurs mid-download (it's already returned and given
you the file object).
remove --- can have several values
0 do not remove the mirror from the list
1 remove the mirror for this download only
2 remove the mirror permanently
beware of remove=0 as it can lead to infinite loops
"""
badmirror = gr.mirrors[gr._next]
self._lock.acquire()
try:
ind = self.mirrors.index(badmirror)
except ValueError:
pass
else:
if action.get('remove_master', 0):
del self.mirrors[ind]
elif self._next == ind and action.get('increment_master', 1):
self._next += 1
if self._next >= len(self.mirrors): self._next = 0
self._lock.release()
if action.get('remove', 1):
del gr.mirrors[gr._next]
elif action.get('increment', 1):
gr._next += 1
if gr._next >= len(gr.mirrors): gr._next = 0
if DEBUG:
grm = [m['mirror'] for m in gr.mirrors]
DEBUG.info('GR mirrors: [%s] %i', ' '.join(grm), gr._next)
selfm = [m['mirror'] for m in self.mirrors]
DEBUG.info('MAIN mirrors: [%s] %i', ' '.join(selfm), self._next)
#####################################################################
# NON-CONFIGURATION METHODS
# these methods are designed to be largely workhorse methods that
# are not intended to be overridden. That doesn't mean you can't;
# if you want to, feel free, but most things can be done by
# by overriding the configuration methods :)
def _join_url(self, base_url, rel_url):
if base_url.endswith('/') or rel_url.startswith('/'):
return base_url + rel_url
else:
return base_url + '/' + rel_url
def _mirror_try(self, func, url, kw):
gr = GrabRequest()
gr.func = func
gr.url = url
gr.kw = dict(kw)
self._load_gr(gr)
for k in self.options:
try: del kw[k]
except KeyError: pass
while 1:
mirrorchoice = self._get_mirror(gr)
fullurl = self._join_url(mirrorchoice['mirror'], gr.url)
kwargs = dict(mirrorchoice.get('kwargs', {}))
kwargs.update(kw)
grabber = mirrorchoice.get('grabber') or self.grabber
func_ref = getattr(grabber, func)
if DEBUG: DEBUG.info('MIRROR: trying %s -> %s', url, fullurl)
try:
return func_ref( *(fullurl,), **kwargs )
except URLGrabError, e:
if DEBUG: DEBUG.info('MIRROR: failed')
obj = CallbackObject()
obj.exception = e
obj.mirror = mirrorchoice['mirror']
obj.relative_url = gr.url
obj.url = fullurl
self._failure(gr, obj)
def urlgrab(self, url, filename=None, **kwargs):
kw = dict(kwargs)
kw['filename'] = filename
func = 'urlgrab'
return self._mirror_try(func, url, kw)
def urlopen(self, url, **kwargs):
kw = dict(kwargs)
func = 'urlopen'
return self._mirror_try(func, url, kw)
def urlread(self, url, limit=None, **kwargs):
kw = dict(kwargs)
kw['limit'] = limit
func = 'urlread'
return self._mirror_try(func, url, kw)
class MGRandomStart(MirrorGroup):
"""A mirror group that starts at a random mirror in the list.
This behavior of this class is identical to MirrorGroup, except that
it starts at a random location in the mirror list.
"""
def __init__(self, grabber, mirrors, **kwargs):
"""Initialize the object
The arguments for intialization are the same as for MirrorGroup
"""
MirrorGroup.__init__(self, grabber, mirrors, **kwargs)
self._next = random.randrange(len(mirrors))
class MGRandomOrder(MirrorGroup):
"""A mirror group that uses mirrors in a random order.
This behavior of this class is identical to MirrorGroup, except that
it uses the mirrors in a random order. Note that the order is set at
initialization time and fixed thereafter. That is, it does not pick a
random mirror after each failure.
"""
def __init__(self, grabber, mirrors, **kwargs):
"""Initialize the object
The arguments for intialization are the same as for MirrorGroup
"""
MirrorGroup.__init__(self, grabber, mirrors, **kwargs)
random.shuffle(self.mirrors)
if __name__ == '__main__':
pass
| gpl-3.0 |
shollen/evennia | evennia/contrib/tutorial_examples/red_button_scripts.py | 6 | 10290 | """
Example of scripts.
These are scripts intended for a particular object - the
red_button object type in contrib/examples. A few variations
on uses of scripts are included.
"""
from evennia import DefaultScript
from evennia.contrib.tutorial_examples import cmdset_red_button as cmdsetexamples
#
# Scripts as state-managers
#
# Scripts have many uses, one of which is to statically
# make changes when a particular state of an object changes.
# There is no "timer" involved in this case (although there could be),
# whenever the script determines it is "invalid", it simply shuts down
# along with all the things it controls.
#
# To show as many features as possible of the script and cmdset systems,
# we will use three scripts controlling one state each of the red_button,
# each with its own set of commands, handled by cmdsets - one for when
# the button has its lid open, and one for when it is closed and a
# last one for when the player pushed the button and gets blinded by
# a bright light. The last one also has a timer component that allows it
# to remove itself after a while (and the player recovers their eyesight).
class ClosedLidState(DefaultScript):
"""
This manages the cmdset for the "closed" button state. What this
means is that while this script is valid, we add the RedButtonClosed
cmdset to it (with commands like open, nudge lid etc)
"""
def at_script_creation(self):
"Called when script first created."
self.desc = "Script that manages the closed-state cmdsets for red button."
self.persistent = True
def at_start(self):
"""
This is called once every server restart, so we want to add the
(memory-resident) cmdset to the object here. is_valid is automatically
checked so we don't need to worry about adding the script to an
open lid.
"""
#All we do is add the cmdset for the closed state.
self.obj.cmdset.add(cmdsetexamples.LidClosedCmdSet)
def is_valid(self):
"""
The script is only valid while the lid is closed.
self.obj is the red_button on which this script is defined.
"""
return not self.obj.db.lid_open
def at_stop(self):
"""
When the script stops we must make sure to clean up after us.
"""
self.obj.cmdset.delete(cmdsetexamples.LidClosedCmdSet)
class OpenLidState(DefaultScript):
"""
This manages the cmdset for the "open" button state. This will add
the RedButtonOpen
"""
def at_script_creation(self):
"Called when script first created."
self.desc = "Script that manages the opened-state cmdsets for red button."
self.persistent = True
def at_start(self):
"""
This is called once every server restart, so we want to add the
(memory-resident) cmdset to the object here. is_valid is
automatically checked, so we don't need to worry about
adding the cmdset to a closed lid-button.
"""
self.obj.cmdset.add(cmdsetexamples.LidOpenCmdSet)
def is_valid(self):
"""
The script is only valid while the lid is open.
self.obj is the red_button on which this script is defined.
"""
return self.obj.db.lid_open
def at_stop(self):
"""
When the script stops (like if the lid is closed again)
we must make sure to clean up after us.
"""
self.obj.cmdset.delete(cmdsetexamples.LidOpenCmdSet)
class BlindedState(DefaultScript):
"""
This is a timed state.
This adds a (very limited) cmdset TO THE PLAYER, during a certain time,
after which the script will close and all functions are
restored. It's up to the function starting the script to actually
set it on the right player object.
"""
def at_script_creation(self):
"""
We set up the script here.
"""
self.key = "temporary_blinder"
self.desc = "Temporarily blinds the player for a little while."
self.interval = 20 # seconds
self.start_delay = True # we don't want it to stop until after 20s.
self.repeats = 1 # this will go away after interval seconds.
self.persistent = False # we will ditch this if server goes down
def at_start(self):
"""
We want to add the cmdset to the linked object.
Note that the RedButtonBlind cmdset is defined to completly
replace the other cmdsets on the stack while it is active
(this means that while blinded, only operations in this cmdset
will be possible for the player to perform). It is however
not persistent, so should there be a bug in it, we just need
to restart the server to clear out of it during development.
"""
self.obj.cmdset.add(cmdsetexamples.BlindCmdSet)
def at_stop(self):
"""
It's important that we clear out that blinded cmdset
when we are done!
"""
self.obj.msg("You blink feverishly as your eyesight slowly returns.")
self.obj.location.msg_contents("%s seems to be recovering their eyesight."
% self.obj.name,
exclude=self.obj)
self.obj.cmdset.delete() # this will clear the latest added cmdset,
# (which is the blinded one).
#
# Timer/Event-like Scripts
#
# Scripts can also work like timers, or "events". Below we
# define three such timed events that makes the button a little
# more "alive" - one that makes the button blink menacingly, another
# that makes the lid covering the button slide back after a while.
#
class CloseLidEvent(DefaultScript):
"""
This event closes the glass lid over the button
some time after it was opened. It's a one-off
script that should be started/created when the
lid is opened.
"""
def at_script_creation(self):
"""
Called when script object is first created. Sets things up.
We want to have a lid on the button that the user can pull
aside in order to make the button 'pressable'. But after a set
time that lid should auto-close again, making the button safe
from pressing (and deleting this command).
"""
self.key = "lid_closer"
self.desc = "Closes lid on a red buttons"
self.interval = 20 # seconds
self.start_delay = True # we want to pospone the launch.
self.repeats = 1 # we only close the lid once
self.persistent = True # even if the server crashes in those 20 seconds,
# the lid will still close once the game restarts.
def is_valid(self):
"""
This script can only operate if the lid is open; if it
is already closed, the script is clearly invalid.
Note that we are here relying on an self.obj being
defined (and being a RedButton object) - this we should be able to
expect since this type of script is always tied to one individual
red button object and not having it would be an error.
"""
return self.obj.db.lid_open
def at_repeat(self):
"""
Called after self.interval seconds. It closes the lid. Before this method is
called, self.is_valid() is automatically checked, so there is no need to
check this manually.
"""
self.obj.close_lid()
class BlinkButtonEvent(DefaultScript):
"""
This timed script lets the button flash at regular intervals.
"""
def at_script_creation(self):
"""
Sets things up. We want the button's lamp to blink at
regular intervals, unless it's broken (can happen
if you try to smash the glass, say).
"""
self.key = "blink_button"
self.desc = "Blinks red buttons"
self.interval = 35 #seconds
self.start_delay = False #blink right away
self.persistent = True #keep blinking also after server reboot
def is_valid(self):
"""
Button will keep blinking unless it is broken.
"""
return self.obj.db.lamp_works
def at_repeat(self):
"""
Called every self.interval seconds. Makes the lamp in
the button blink.
"""
self.obj.blink()
class DeactivateButtonEvent(DefaultScript):
"""
This deactivates the button for a short while (it won't blink, won't
close its lid etc). It is meant to be called when the button is pushed
and run as long as the blinded effect lasts. We cannot put these methods
in the AddBlindedCmdSet script since that script is defined on the *player*
whereas this one must be defined on the *button*.
"""
def at_script_creation(self):
"""
Sets things up.
"""
self.key = "deactivate_button"
self.desc = "Deactivate red button temporarily"
self.interval = 21 #seconds
self.start_delay = True # wait with the first repeat for self.interval seconds.
self.persistent = True
self.repeats = 1 # only do this once
def at_start(self):
"""
Deactivate the button. Observe that this method is always
called directly, regardless of the value of self.start_delay
(that just controls when at_repeat() is called)
"""
# closing the lid will also add the ClosedState script
self.obj.close_lid()
# lock the lid so other players can't access it until the
# first one's effect has worn off.
self.obj.db.lid_locked = True
# breaking the lamp also sets a correct desc
self.obj.break_lamp(feedback=False)
def at_repeat(self):
"""
When this is called, reset the functionality of the button.
"""
# restore button's desc.
self.obj.db.lamp_works = True
desc = "This is a large red button, inviting yet evil-looking. "
desc += "Its glass cover is closed, protecting it."
self.db.desc = desc
# re-activate the blink button event.
self.obj.scripts.add(BlinkButtonEvent)
# unlock the lid
self.obj.db.lid_locked = False
self.obj.scripts.validate()
| bsd-3-clause |
yoshinorim/mysql-5.6 | xtrabackup/test/kewpie/percona_tests/xtrabackup_disabled/ib_databases_test_disabled.py | 24 | 5057 | #! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import shutil
import time
from lib.util.mysqlBaseTestCase import mysqlBaseTestCase
server_requirements = [[],[]]
servers = []
server_manager = None
test_executor = None
# we explicitly use the --no-timestamp option
# here. We will be using a generic / vanilla backup dir
backup_path = None
class basicTest(mysqlBaseTestCase):
def setUp(self):
master_server = servers[0] # assumption that this is 'master'
backup_path = os.path.join(master_server.vardir, '_xtrabackup')
# remove backup path
if os.path.exists(backup_path):
shutil.rmtree(backup_path)
def test_basic1(self):
self.servers = servers
if servers[0].type not in ['mysql','percona']:
return
else:
innobackupex = test_executor.system_manager.innobackupex_path
xtrabackup = test_executor.system_manager.xtrabackup_path
master_server = servers[0] # assumption that this is 'master'
copy_server = servers[1]
backup_path = os.path.join(master_server.vardir, '_xtrabackup')
output_path = os.path.join(master_server.vardir, 'innobackupex.out')
exec_path = os.path.dirname(innobackupex)
#populate our server with a test bed
test_cmd = "./gentest.pl --gendata=conf/percona/percona.zz "
retcode, output = self.execute_randgen(test_cmd, test_executor, master_server)
# create additional schemas for backup
schema_basename='test'
for i in range(6):
schema = schema_basename+str(i)
query = "CREATE SCHEMA %s" %(schema)
retcode, result_set = self.execute_query(query, master_server)
self.assertEquals(retcode,0, msg=result_set)
retcode, output = self.execute_randgen(test_cmd, test_executor, master_server, schema)
# take a backup
cmd = ("%s --defaults-file=%s --user=root --port=%d"
" --host=127.0.0.1 --no-timestamp"
#" --databases='mysql,test,test2' "
" --ibbackup=%s %s" %( innobackupex
, master_server.cnf_file
, master_server.master_port
, xtrabackup
, backup_path))
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
#print cmd
self.assertTrue(retcode==0,output)
# shutdown our server
copy_server.stop()
# prepare our backup
cmd = ("%s --apply-log --no-timestamp --use-memory=500M "
"--ibbackup=%s %s" %( innobackupex
, xtrabackup
, backup_path))
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertEqual(retcode, 0,output)
# remove old datadir
shutil.rmtree(copy_server.datadir)
os.mkdir(copy_server.datadir)
# restore from backup
cmd = ("%s --defaults-file=%s --copy-back"
" --ibbackup=%s %s" %( innobackupex
, copy_server.cnf_file
, xtrabackup
, backup_path))
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertEqual(retcode,0, output)
# restart server (and ensure it doesn't crash)
copy_server.start()
self.assertEqual(master_server.status, 1, 'Server failed restart from restored datadir...')
# Check schemas copied / restored
query = "SHOW SCHEMAS"
retcode, result = self.execute_query(query, copy_server)
# TODO have an actual check!
# Check copy vs. orig
comp_result = self.check_slaves_by_checksum(master_server,[copy_server],schemas=['test','test2'])
self.assertEqual(comp_result, None, comp_result)
| gpl-2.0 |
w1r0x/ansible | lib/ansible/new_inventory/__init__.py | 53 | 11203 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import sys
from ansible import constants as C
from ansible.inventory.group import Group
from .host import Host
from ansible.plugins.inventory.aggregate import InventoryAggregateParser
from ansible import errors
class Inventory:
'''
Create hosts and groups from inventory
Retrieve the hosts and groups that ansible knows about from this class.
Retrieve raw variables (non-expanded) from the Group and Host classes
returned from here.
'''
def __init__(self, inventory_list=C.DEFAULT_HOST_LIST):
'''
:kwarg inventory_list: A list of inventory sources. This may be file
names which will be parsed as ini-like files, executable scripts
which return inventory data as json, directories of both of the above,
or hostnames. Files and directories are
:kwarg vault_password: Password to use if any of the inventory sources
are in an ansible vault
'''
self._restricted_to = None
self._filter_pattern = None
parser = InventoryAggregateParser(inventory_list)
parser.parse()
self._basedir = parser.basedir
self._hosts = parser.hosts
self._groups = parser.groups
def get_hosts(self):
'''
Return the list of hosts, after filtering based on any set pattern
and restricting the results based on the set host restrictions.
'''
if self._filter_pattern:
hosts = self._filter_hosts()
else:
hosts = self._hosts[:]
if self._restricted_to is not None:
# this will preserve the order of hosts after intersecting them
res_set = set(hosts).intersection(self._restricted_to)
return [h for h in hosts if h in res_set]
else:
return hosts[:]
def get_groups(self):
'''
Retrieve the Group objects known to the Inventory
'''
return self._groups[:]
def get_host(self, hostname):
'''
Retrieve the Host object for a hostname
'''
for host in self._hosts:
if host.name == hostname:
return host
return None
def get_group(self, groupname):
'''
Retrieve the Group object for a groupname
'''
for group in self._groups:
if group.name == groupname:
return group
return None
def add_group(self, group):
'''
Add a new group to the inventory
'''
if group not in self._groups:
self._groups.append(group)
def set_filter_pattern(self, pattern='all'):
'''
Sets a pattern upon which hosts/groups will be filtered.
This pattern can contain logical groupings such as unions,
intersections and negations using special syntax.
'''
self._filter_pattern = pattern
def set_host_restriction(self, restriction):
'''
Restrict operations to hosts in the given list
'''
assert isinstance(restriction, list)
self._restricted_to = restriction[:]
def remove_host_restriction(self):
'''
Remove the restriction on hosts, if any.
'''
self._restricted_to = None
def _filter_hosts(self):
"""
Limits inventory results to a subset of inventory that matches a given
list of patterns, such as to select a subset of a hosts selection that also
belongs to a certain geographic group or numeric slice.
Corresponds to --limit parameter to ansible-playbook
:arg patterns: The pattern to limit with. If this is None it
clears the subset. Multiple patterns may be specified as a comma,
semicolon, or colon separated string.
"""
hosts = []
pattern_regular = []
pattern_intersection = []
pattern_exclude = []
patterns = self._pattern.replace(";",":").split(":")
for p in patterns:
if p.startswith("!"):
pattern_exclude.append(p)
elif p.startswith("&"):
pattern_intersection.append(p)
elif p:
pattern_regular.append(p)
# if no regular pattern was given, hence only exclude and/or intersection
# make that magically work
if pattern_regular == []:
pattern_regular = ['all']
# when applying the host selectors, run those without the "&" or "!"
# first, then the &s, then the !s.
patterns = pattern_regular + pattern_intersection + pattern_exclude
for p in patterns:
intersect = False
negate = False
if p.startswith('&'):
intersect = True
elif p.startswith('!'):
p = p[1:]
negate = True
target = self._resolve_pattern(p)
if isinstance(target, Host):
if negate and target in hosts:
# remove it
hosts.remove(target)
elif target not in hosts:
# for both union and intersections, we just append it
hosts.append(target)
else:
if intersect:
hosts = [ h for h in hosts if h not in target ]
elif negate:
hosts = [ h for h in hosts if h in target ]
else:
to_append = [ h for h in target if h.name not in [ y.name for y in hosts ] ]
hosts.extend(to_append)
return hosts
def _resolve_pattern(self, pattern):
target = self.get_host(pattern)
if target:
return target
else:
(name, enumeration_details) = self._enumeration_info(pattern)
hpat = self._hosts_in_unenumerated_pattern(name)
result = self._apply_ranges(pattern, hpat)
return result
def _enumeration_info(self, pattern):
"""
returns (pattern, limits) taking a regular pattern and finding out
which parts of it correspond to start/stop offsets. limits is
a tuple of (start, stop) or None
"""
# Do not parse regexes for enumeration info
if pattern.startswith('~'):
return (pattern, None)
# The regex used to match on the range, which can be [x] or [x-y].
pattern_re = re.compile("^(.*)\[([-]?[0-9]+)(?:(?:-)([0-9]+))?\](.*)$")
m = pattern_re.match(pattern)
if m:
(target, first, last, rest) = m.groups()
first = int(first)
if last:
if first < 0:
raise errors.AnsibleError("invalid range: negative indices cannot be used as the first item in a range")
last = int(last)
else:
last = first
return (target, (first, last))
else:
return (pattern, None)
def _apply_ranges(self, pat, hosts):
"""
given a pattern like foo, that matches hosts, return all of hosts
given a pattern like foo[0:5], where foo matches hosts, return the first 6 hosts
"""
# If there are no hosts to select from, just return the
# empty set. This prevents trying to do selections on an empty set.
# issue#6258
if not hosts:
return hosts
(loose_pattern, limits) = self._enumeration_info(pat)
if not limits:
return hosts
(left, right) = limits
if left == '':
left = 0
if right == '':
right = 0
left=int(left)
right=int(right)
try:
if left != right:
return hosts[left:right]
else:
return [ hosts[left] ]
except IndexError:
raise errors.AnsibleError("no hosts matching the pattern '%s' were found" % pat)
def _hosts_in_unenumerated_pattern(self, pattern):
""" Get all host names matching the pattern """
results = []
hosts = []
hostnames = set()
# ignore any negative checks here, this is handled elsewhere
pattern = pattern.replace("!","").replace("&", "")
def __append_host_to_results(host):
if host not in results and host.name not in hostnames:
hostnames.add(host.name)
results.append(host)
groups = self.get_groups()
for group in groups:
if pattern == 'all':
for host in group.get_hosts():
__append_host_to_results(host)
else:
if self._match(group.name, pattern):
for host in group.get_hosts():
__append_host_to_results(host)
else:
matching_hosts = self._match_list(group.get_hosts(), 'name', pattern)
for host in matching_hosts:
__append_host_to_results(host)
if pattern in ["localhost", "127.0.0.1"] and len(results) == 0:
new_host = self._create_implicit_localhost(pattern)
results.append(new_host)
return results
def _create_implicit_localhost(self, pattern):
new_host = Host(pattern)
new_host._connection = 'local'
new_host.set_variable("ansible_python_interpreter", sys.executable)
ungrouped = self.get_group("ungrouped")
if ungrouped is None:
self.add_group(Group('ungrouped'))
ungrouped = self.get_group('ungrouped')
self.get_group('all').add_child_group(ungrouped)
ungrouped.add_host(new_host)
return new_host
def is_file(self):
'''
Did inventory come from a file?
:returns: True if the inventory is file based, False otherwise
'''
pass
def src(self):
'''
What's the complete path to the inventory file?
:returns: Complete path to the inventory file. None if inventory is
not file-based
'''
pass
def basedir(self):
'''
What directory from which the inventory was read.
'''
return self._basedir
| gpl-3.0 |
edx/edx-platform | openedx/core/djangoapps/site_configuration/tests/test_models.py | 4 | 14171 | """
Tests for site configuration's django models.
"""
from unittest.mock import patch
import pytest
from django.contrib.sites.models import Site
from django.db import IntegrityError, transaction
from django.test import TestCase
from openedx.core.djangoapps.site_configuration.models import (
SiteConfiguration,
SiteConfigurationHistory,
save_siteconfig_without_historical_record
)
from openedx.core.djangoapps.site_configuration.tests.factories import SiteConfigurationFactory
class SiteConfigurationTests(TestCase):
"""
Tests for SiteConfiguration and its signals/receivers.
"""
domain = 'site_configuration_post_save_receiver_example.com'
name = 'site_configuration_post_save_receiver_example'
test_config1 = {
"university": "Test University",
"platform_name": "Test Education Program",
"SITE_NAME": "test.localhost",
"course_org_filter": "TestX",
"css_overrides_file": "test/css/site.css",
"ENABLE_MKTG_SITE": False,
"ENABLE_THIRD_PARTY_AUTH": False,
"course_about_show_social_links": False,
"favicon_path": "/static/test.ico",
}
test_config2 = {
"university": "Test Another University",
"platform_name": "Test Another Education Program",
"SITE_NAME": "test-another.localhost",
"course_org_filter": "TestAnotherX",
"css_overrides_file": "test-another/css/site.css",
"ENABLE_MKTG_SITE": True,
"ENABLE_THIRD_PARTY_AUTH": True,
"course_about_show_social_links": False,
"favicon_path": "/static/test-another.ico",
}
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.site, _ = Site.objects.get_or_create(domain=cls.domain, name=cls.domain)
cls.site2, _ = Site.objects.get_or_create(
domain=cls.test_config2['SITE_NAME'],
name=cls.test_config2['SITE_NAME'],
)
def test_site_configuration_post_save_receiver(self):
"""
Test that and entry is added to SiteConfigurationHistory model each time a new
SiteConfiguration is added.
"""
# add SiteConfiguration to database
site_configuration = SiteConfigurationFactory.create(
site=self.site,
)
# Verify an entry to SiteConfigurationHistory was added.
site_configuration_history = SiteConfigurationHistory.objects.filter(
site=site_configuration.site,
).all()
# Make sure an entry (and only one entry) is saved for SiteConfiguration
assert len(site_configuration_history) == 1
def test_site_configuration_post_update_receiver(self):
"""
Test that and entry is added to SiteConfigurationHistory each time a
SiteConfiguration is updated.
"""
# add SiteConfiguration to database
site_configuration = SiteConfigurationFactory.create(
site=self.site,
)
site_configuration.site_values = {'test': 'test'}
site_configuration.save()
# Verify an entry to SiteConfigurationHistory was added.
site_configuration_history = SiteConfigurationHistory.objects.filter(
site=site_configuration.site,
).all()
# Make sure two entries (one for create and one for update) are saved for SiteConfiguration
assert len(site_configuration_history) == 2
def test_site_configuration_post_update_receiver_with_skip(self):
"""
Test that and entry is NOT added to SiteConfigurationHistory each time a
SiteConfiguration is updated with save_siteconfig_without_historical_record().
"""
# Add SiteConfiguration to database. By default, the site_valutes field contains only "{}".
site_configuration = SiteConfigurationFactory.create(
site=self.site,
)
# Update the SiteConfiguration we just created.
site_configuration.site_values = {"test": "test"}
save_siteconfig_without_historical_record(site_configuration) # Instead of .save().
# Verify that the SiteConfiguration has been updated.
assert site_configuration.get_value('test') == 'test'
# Verify an entry to SiteConfigurationHistory was NOT added.
# Make sure one entry (one for create and NONE for update) is saved for SiteConfiguration.
site_configuration_history = SiteConfigurationHistory.objects.filter(
site=site_configuration.site,
).all()
assert len(site_configuration_history) == 1
def test_no_entry_is_saved_for_errors(self):
"""
Test that and entry is not added to SiteConfigurationHistory if there is an error while
saving SiteConfiguration.
"""
# add SiteConfiguration to database
site_configuration = SiteConfigurationFactory.create(
site=self.site,
)
# Verify an entry to SiteConfigurationHistory was added.
site_configuration_history = SiteConfigurationHistory.objects.filter(
site=site_configuration.site,
).all()
# Make sure entry is saved if there is no error
assert len(site_configuration_history) == 1
with transaction.atomic():
with pytest.raises(IntegrityError):
# try to add a duplicate entry
site_configuration = SiteConfigurationFactory.create(
site=self.site,
)
site_configuration_history = SiteConfigurationHistory.objects.filter(
site=site_configuration.site,
).all()
# Make sure no entry is saved if there an error
assert len(site_configuration_history) == 1
def test_get_value(self):
"""
Test that get_value returns correct value for any given key.
"""
# add SiteConfiguration to database
site_configuration = SiteConfigurationFactory.create(
site=self.site,
site_values=self.test_config1
)
# Make sure entry is saved and retrieved correctly
assert site_configuration.get_value('university') == self.test_config1['university']
assert site_configuration.get_value('platform_name') == self.test_config1['platform_name']
assert site_configuration.get_value('SITE_NAME') == self.test_config1['SITE_NAME']
assert site_configuration.get_value('course_org_filter') == self.test_config1['course_org_filter']
assert site_configuration.get_value('css_overrides_file') == self.test_config1['css_overrides_file']
assert site_configuration.get_value('ENABLE_MKTG_SITE') == self.test_config1['ENABLE_MKTG_SITE']
assert site_configuration.get_value('favicon_path') == self.test_config1['favicon_path']
assert site_configuration.get_value('ENABLE_THIRD_PARTY_AUTH') == self.test_config1['ENABLE_THIRD_PARTY_AUTH']
assert site_configuration.get_value('course_about_show_social_links') == \
self.test_config1['course_about_show_social_links']
# Test that the default value is returned if the value for the given key is not found in the configuration
assert site_configuration.get_value('non_existent_name', 'dummy-default-value') == 'dummy-default-value'
# Test that the default value is returned if Site configuration is not enabled
site_configuration.enabled = False
site_configuration.save()
assert site_configuration.get_value('university') is None
assert site_configuration.get_value('platform_name', 'Default Platform Name') == 'Default Platform Name'
assert site_configuration.get_value('SITE_NAME', 'Default Site Name') == 'Default Site Name'
def test_invalid_data_error_on_get_value(self):
"""
Test that get_value logs an error if json data is not valid.
"""
# import logger, for patching
from openedx.core.djangoapps.site_configuration.models import logger
invalid_data = [self.test_config1]
# add SiteConfiguration to database
site_configuration = SiteConfigurationFactory.create(
site=self.site,
site_values=invalid_data
)
# make sure get_value logs an error for invalid json data
with patch.object(logger, "exception") as mock_logger:
assert site_configuration.get_value('university') is None
assert mock_logger.called
# make sure get_value returns default_value for invalid json data
with patch.object(logger, "exception") as mock_logger:
value = site_configuration.get_value("platform_name", "Default Platform Name")
assert mock_logger.called
assert value == 'Default Platform Name'
def test_get_value_for_org(self):
"""
Test that get_value_for_org returns correct value for any given key.
"""
# add SiteConfiguration to database
SiteConfigurationFactory.create(
site=self.site,
site_values=self.test_config1
)
SiteConfigurationFactory.create(
site=self.site2,
site_values=self.test_config2
)
# Make sure entry is saved and retrieved correctly
assert SiteConfiguration.get_value_for_org(self.test_config1['course_org_filter'], 'university') ==\
self.test_config1['university']
assert SiteConfiguration.get_value_for_org(self.test_config1['course_org_filter'], 'platform_name') ==\
self.test_config1['platform_name']
assert SiteConfiguration.get_value_for_org(self.test_config1['course_org_filter'], 'SITE_NAME') ==\
self.test_config1['SITE_NAME']
assert SiteConfiguration.get_value_for_org(self.test_config1['course_org_filter'], 'css_overrides_file') ==\
self.test_config1['css_overrides_file']
assert SiteConfiguration.get_value_for_org(self.test_config1['course_org_filter'], 'ENABLE_MKTG_SITE') ==\
self.test_config1['ENABLE_MKTG_SITE']
# Make sure entry is saved and retrieved correctly
assert SiteConfiguration.get_value_for_org(self.test_config2['course_org_filter'], 'university') ==\
self.test_config2['university']
assert SiteConfiguration.get_value_for_org(self.test_config2['course_org_filter'], 'platform_name') ==\
self.test_config2['platform_name']
assert SiteConfiguration\
.get_value_for_org(self.test_config2['course_org_filter'], 'SITE_NAME') == \
self.test_config2['SITE_NAME']
assert SiteConfiguration\
.get_value_for_org(self.test_config2['course_org_filter'],
'css_overrides_file') == self.test_config2['css_overrides_file']
assert SiteConfiguration\
.get_value_for_org(self.test_config2['course_org_filter'],
'ENABLE_MKTG_SITE') == self.test_config2['ENABLE_MKTG_SITE']
# Test that the default value is returned if the value for the given key is not found in the configuration
assert SiteConfiguration\
.get_value_for_org(self.test_config1['course_org_filter'],
'non-existent', 'dummy-default-value') == 'dummy-default-value'
# Test that the default value is returned if the value for the given key is not found in the configuration
assert SiteConfiguration\
.get_value_for_org(self.test_config2['course_org_filter'],
'non-existent', 'dummy-default-value') == 'dummy-default-value'
# Test that the default value is returned if org is not found in the configuration
assert SiteConfiguration.get_value_for_org('non-existent-org', 'platform_name', 'dummy-default-value') ==\
'dummy-default-value'
def test_get_site_for_org(self):
"""
Test that get_value_for_org returns correct value for any given key.
"""
# add SiteConfiguration to database
config1 = SiteConfigurationFactory.create(
site=self.site,
site_values=self.test_config1
)
config2 = SiteConfigurationFactory.create(
site=self.site2,
site_values=self.test_config2
)
# Make sure entry is saved and retrieved correctly
assert SiteConfiguration.get_configuration_for_org(self.test_config1['course_org_filter']) == config1
assert SiteConfiguration.get_configuration_for_org(self.test_config2['course_org_filter']) == config2
assert SiteConfiguration.get_configuration_for_org('something else') is None
def test_get_all_orgs(self):
"""
Test that get_all_orgs returns all orgs from site configuration.
"""
expected_orgs = [self.test_config1['course_org_filter'], self.test_config2['course_org_filter']]
# add SiteConfiguration to database
SiteConfigurationFactory.create(
site=self.site,
site_values=self.test_config1
)
SiteConfigurationFactory.create(
site=self.site2,
site_values=self.test_config2
)
# Test that the default value is returned if the value for the given key is not found in the configuration
self.assertCountEqual(SiteConfiguration.get_all_orgs(), expected_orgs)
def test_get_all_orgs_returns_only_enabled(self):
"""
Test that get_all_orgs returns only those orgs whose configurations are enabled.
"""
expected_orgs = [self.test_config2['course_org_filter']]
# add SiteConfiguration to database
SiteConfigurationFactory.create(
site=self.site,
site_values=self.test_config1,
enabled=False,
)
SiteConfigurationFactory.create(
site=self.site2,
site_values=self.test_config2
)
# Test that the default value is returned if the value for the given key is not found in the configuration
self.assertCountEqual(SiteConfiguration.get_all_orgs(), expected_orgs)
| agpl-3.0 |
mathemage/h2o-3 | h2o-py/tests/testdir_hdfs/pyunit_INTERNAL_HDFS_timestamp_date_orc.py | 4 | 2826 | from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import h2o
import time
from tests import pyunit_utils
#----------------------------------------------------------------------
# This test will parse orc files containing timestamp and date information into
# H2O frame. Next, it will take the .csv file generated from the orc file from
# Hive and parse into H2O frame. Finally, we compare the two frames and make sure
# that they are equal.
#
# We want to make sure that we are parsing the date and timestamp
# date correctly from an orc file. Thanks to Nidhi who has imported an orc file
# containing timestamp/date into spark and later into Hive and write it out as
# csv.
#
#----------------------------------------------------------------------
def hdfs_orc_parser():
# Check if we are running inside the H2O network by seeing if we can touch
# the namenode.
hadoop_namenode_is_accessible = pyunit_utils.hadoop_namenode_is_accessible()
if hadoop_namenode_is_accessible:
hdfs_name_node = pyunit_utils.hadoop_namenode()
if pyunit_utils.cannaryHDFSTest(hdfs_name_node, "/datasets/orc_parser/orc/orc_split_elim.orc"):
print("Your hive-exec version is too old. Orc parser test {0} is "
"skipped.".format("pyunit_INTERNAL_HDFS_timestamp_date_orc.py"))
pass
else:
tol_time = 200 # comparing in ms or ns
tol_numeric = 1e-5 # tolerance for comparing other numeric fields
numElements2Compare = 100 # choose number of elements per column to compare. Save test time.
allOrcFiles = ["/datasets/orc_parser/orc/TestOrcFile.testDate1900.orc",
"/datasets/orc_parser/orc/TestOrcFile.testDate2038.orc",
"/datasets/orc_parser/orc/orc_split_elim.orc"]
allCsvFiles = ["/datasets/orc_parser/csv/TestOrcFile.testDate1900.csv",
"/datasets/orc_parser/csv/TestOrcFile.testDate2038.csv",
"/datasets/orc_parser/csv/orc_split_elim.csv"]
for fIndex in range(len(allOrcFiles)):
url_orc = "hdfs://{0}{1}".format(hdfs_name_node, allOrcFiles[fIndex])
url_csv = "hdfs://{0}{1}".format(hdfs_name_node, allCsvFiles[fIndex])
h2oOrc = h2o.import_file(url_orc)
h2oCsv = h2o.import_file(url_csv)
# compare the two frames
assert pyunit_utils.compare_frames(h2oOrc, h2oCsv, numElements2Compare, tol_time, tol_numeric), \
"H2O frame parsed from orc and csv files are different!"
else:
raise EnvironmentError
if __name__ == "__main__":
pyunit_utils.standalone_test(hdfs_orc_parser)
else:
hdfs_orc_parser() | apache-2.0 |
amenonsen/ansible | lib/ansible/modules/network/cloudengine/ce_ntp.py | 7 | 20789 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_ntp
version_added: "2.4"
short_description: Manages core NTP configuration on HUAWEI CloudEngine switches.
description:
- Manages core NTP configuration on HUAWEI CloudEngine switches.
author:
- Zhijin Zhou (@QijunPan)
options:
server:
description:
- Network address of NTP server.
peer:
description:
- Network address of NTP peer.
key_id:
description:
- Authentication key identifier to use with given NTP server or peer.
is_preferred:
description:
- Makes given NTP server or peer the preferred NTP server or peer for the device.
choices: ['enable', 'disable']
vpn_name:
description:
- Makes the device communicate with the given
NTP server or peer over a specific vpn.
default: '_public_'
source_int:
description:
- Local source interface from which NTP messages are sent.
Must be fully qualified interface name, i.e. C(40GE1/0/22), C(vlanif10).
Interface types, such as C(10GE), C(40GE), C(100GE), C(Eth-Trunk), C(LoopBack),
C(MEth), C(NULL), C(Tunnel), C(Vlanif).
state:
description:
- Manage the state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: NTP test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Set NTP Server with parameters"
ce_ntp:
server: 192.8.2.6
vpn_name: js
source_int: vlanif4001
is_preferred: enable
key_id: 32
provider: "{{ cli }}"
- name: "Set NTP Peer with parameters"
ce_ntp:
peer: 192.8.2.6
vpn_name: js
source_int: vlanif4001
is_preferred: enable
key_id: 32
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"server": "2.2.2.2", "key_id": "48",
"is_preferred": "enable", "vpn_name":"js",
"source_int": "vlanif4002", "state":"present"}
existing:
description: k/v pairs of existing ntp server/peer
returned: always
type: dict
sample: {"server": "2.2.2.2", "key_id": "32",
"is_preferred": "disable", "vpn_name":"js",
"source_int": "vlanif4002"}
end_state:
description: k/v pairs of ntp info after module execution
returned: always
type: dict
sample: {"server": "2.2.2.2", "key_id": "48",
"is_preferred": "enable", "vpn_name":"js",
"source_int": "vlanif4002"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["ntp server 2.2.2.2 authentication-keyid 48 source-interface vlanif4002 vpn-instance js preferred"]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
import re
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec, get_nc_config, set_nc_config
CE_NC_GET_NTP_CONFIG = """
<filter type="subtree">
<ntp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ntpUCastCfgs>
<ntpUCastCfg>
<addrFamily></addrFamily>
<vpnName></vpnName>
<ifName></ifName>
<ipv4Addr></ipv4Addr>
<ipv6Addr></ipv6Addr>
<type></type>
<isPreferred></isPreferred>
<keyId></keyId>
</ntpUCastCfg>
</ntpUCastCfgs>
</ntp>
</filter>
"""
CE_NC_MERGE_NTP_CONFIG = """
<config>
<ntp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ntpUCastCfgs>
<ntpUCastCfg operation="merge">
<addrFamily>%s</addrFamily>
<ipv4Addr>%s</ipv4Addr>
<ipv6Addr>%s</ipv6Addr>
<type>%s</type>
<vpnName>%s</vpnName>
<keyId>%s</keyId>
<isPreferred>%s</isPreferred>
<ifName>%s</ifName>
<neid>0-0</neid>
</ntpUCastCfg>
</ntpUCastCfgs>
</ntp>
</config>
"""
CE_NC_DELETE_NTP_CONFIG = """
<config>
<ntp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ntpUCastCfgs>
<ntpUCastCfg operation="delete">
<addrFamily>%s</addrFamily>
<ipv4Addr>%s</ipv4Addr>
<ipv6Addr>%s</ipv6Addr>
<type>%s</type>
<vpnName>%s</vpnName>
<neid>0-0</neid>
</ntpUCastCfg>
</ntpUCastCfgs>
</ntp>
</config>
"""
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-Port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
class Ntp(object):
"""Ntp class"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.mutually_exclusive = [('server', 'peer')]
self.init_module()
# ntp configration info
self.server = self.module.params['server'] or None
self.peer = self.module.params['peer'] or None
self.key_id = self.module.params['key_id']
self.is_preferred = self.module.params['is_preferred']
self.vpn_name = self.module.params['vpn_name']
self.interface = self.module.params['source_int'] or ""
self.state = self.module.params['state']
self.ntp_conf = dict()
self.conf_exsit = False
self.ip_ver = 'IPv4'
if self.server:
self.peer_type = 'Server'
self.address = self.server
elif self.peer:
self.peer_type = 'Peer'
self.address = self.peer
else:
self.peer_type = None
self.address = None
self.check_params()
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = list()
self.end_state = list()
self.init_data()
def init_data(self):
"""Init data"""
if self.interface is not None:
self.interface = self.interface.lower()
if not self.key_id:
self.key_id = ""
if not self.is_preferred:
self.is_preferred = 'disable'
def init_module(self):
"""Init module"""
required_one_of = [("server", "peer")]
self.module = AnsibleModule(
argument_spec=self.spec,
supports_check_mode=True,
required_one_of=required_one_of,
mutually_exclusive=self.mutually_exclusive
)
def check_ipaddr_validate(self):
"""Check ipaddress validate"""
rule1 = r'(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\.'
rule2 = r'(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])'
ipv4_regex = '%s%s%s%s%s%s' % ('^', rule1, rule1, rule1, rule2, '$')
ipv6_regex = '^(?:[a-fA-F0-9]{1,4}:){7}[a-fA-F0-9]{1,4}$'
flag = False
if bool(re.match(ipv4_regex, self.address)):
flag = True
self.ip_ver = "IPv4"
if not self.ntp_ucast_ipv4_validate():
flag = False
elif bool(re.match(ipv6_regex, self.address)):
flag = True
self.ip_ver = "IPv6"
else:
flag = True
self.ip_ver = "IPv6"
if not flag:
if self.peer_type == "Server":
self.module.fail_json(msg='Error: Illegal server ip-address.')
else:
self.module.fail_json(msg='Error: Illegal peer ip-address.')
def ntp_ucast_ipv4_validate(self):
"""Check ntp ucast ipv4 address"""
addr_list = re.findall(r'(.*)\.(.*)\.(.*)\.(.*)', self.address)
if not addr_list:
self.module.fail_json(msg='Error: Match ip-address fail.')
value = ((int(addr_list[0][0])) * 0x1000000) + (int(addr_list[0][1]) * 0x10000) + \
(int(addr_list[0][2]) * 0x100) + (int(addr_list[0][3]))
if (value & (0xff000000) == 0x7f000000) or (value & (0xF0000000) == 0xF0000000) \
or (value & (0xF0000000) == 0xE0000000) or (value == 0):
return False
return True
def check_params(self):
"""Check all input params"""
# check interface type
if self.interface:
intf_type = get_interface_type(self.interface)
if not intf_type:
self.module.fail_json(
msg='Error: Interface name of %s '
'is error.' % self.interface)
if self.vpn_name:
if (len(self.vpn_name) < 1) or (len(self.vpn_name) > 31):
self.module.fail_json(
msg='Error: VPN name length is beetween 1 and 31.')
if self.address:
self.check_ipaddr_validate()
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed."""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def set_ntp(self, *args):
"""Configure ntp parameters"""
if self.state == 'present':
if self.ip_ver == 'IPv4':
xml_str = CE_NC_MERGE_NTP_CONFIG % (
args[0], args[1], '::', args[2], args[3], args[4], args[5], args[6])
elif self.ip_ver == 'IPv6':
xml_str = CE_NC_MERGE_NTP_CONFIG % (
args[0], '0.0.0.0', args[1], args[2], args[3], args[4], args[5], args[6])
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "NTP_CORE_CONFIG")
else:
if self.ip_ver == 'IPv4':
xml_str = CE_NC_DELETE_NTP_CONFIG % (
args[0], args[1], '::', args[2], args[3])
elif self.ip_ver == 'IPv6':
xml_str = CE_NC_DELETE_NTP_CONFIG % (
args[0], '0.0.0.0', args[1], args[2], args[3])
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "UNDO_NTP_CORE_CONFIG")
def config_ntp(self):
"""Config ntp"""
if self.state == "present":
if self.address and not self.conf_exsit:
if self.is_preferred == 'enable':
is_preferred = 'true'
else:
is_preferred = 'false'
self.set_ntp(self.ip_ver, self.address, self.peer_type,
self.vpn_name, self.key_id, is_preferred, self.interface)
self.changed = True
else:
if self.address:
self.set_ntp(self.ip_ver, self.address,
self.peer_type, self.vpn_name, '', '', '')
self.changed = True
def show_result(self):
"""Show result"""
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def get_ntp_exist_config(self):
"""Get ntp existed configure"""
ntp_config = list()
conf_str = CE_NC_GET_NTP_CONFIG
con_obj = get_nc_config(self.module, conf_str)
if "<data/>" in con_obj:
return ntp_config
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get all ntp config info
root = ElementTree.fromstring(xml_str)
ntpsite = root.findall("ntp/ntpUCastCfgs/ntpUCastCfg")
for nexthop in ntpsite:
ntp_dict = dict()
for ele in nexthop:
if ele.tag in ["addrFamily", "vpnName", "ifName", "ipv4Addr",
"ipv6Addr", "type", "isPreferred", "keyId"]:
ntp_dict[ele.tag] = ele.text
ip_addr = ntp_dict['ipv6Addr']
if ntp_dict['addrFamily'] == "IPv4":
ip_addr = ntp_dict['ipv4Addr']
if ntp_dict['ifName'] is None:
ntp_dict['ifName'] = ""
if ntp_dict['isPreferred'] == 'true':
is_preferred = 'enable'
else:
is_preferred = 'disable'
if self.state == "present":
key_id = ntp_dict['keyId'] or ""
cur_ntp_cfg = dict(vpn_name=ntp_dict['vpnName'], source_int=ntp_dict['ifName'].lower(), address=ip_addr,
peer_type=ntp_dict['type'], prefer=is_preferred, key_id=key_id)
exp_ntp_cfg = dict(vpn_name=self.vpn_name, source_int=self.interface.lower(), address=self.address,
peer_type=self.peer_type, prefer=self.is_preferred, key_id=self.key_id)
if cur_ntp_cfg == exp_ntp_cfg:
self.conf_exsit = True
vpn_name = ntp_dict['vpnName']
if ntp_dict['vpnName'] == "_public_":
vpn_name = None
if_name = ntp_dict['ifName']
if if_name == "":
if_name = None
if self.peer_type == 'Server':
ntp_config.append(dict(vpn_name=vpn_name,
source_int=if_name, server=ip_addr,
is_preferred=is_preferred, key_id=ntp_dict['keyId']))
else:
ntp_config.append(dict(vpn_name=vpn_name,
source_int=if_name, peer=ip_addr,
is_preferred=is_preferred, key_id=ntp_dict['keyId']))
return ntp_config
def get_existing(self):
"""Get existing info"""
if self.address:
self.existing = self.get_ntp_exist_config()
def get_proposed(self):
"""Get proposed info"""
if self.address:
vpn_name = self.vpn_name
if vpn_name == "_public_":
vpn_name = None
if_name = self.interface
if if_name == "":
if_name = None
key_id = self.key_id
if key_id == "":
key_id = None
if self.peer_type == 'Server':
self.proposed = dict(state=self.state, vpn_name=vpn_name,
source_int=if_name, server=self.address,
is_preferred=self.is_preferred, key_id=key_id)
else:
self.proposed = dict(state=self.state, vpn_name=vpn_name,
source_int=if_name, peer=self.address,
is_preferred=self.is_preferred, key_id=key_id)
def get_end_state(self):
"""Get end state info"""
if self.address:
self.end_state = self.get_ntp_exist_config()
def get_update_cmd(self):
"""Get updated commands"""
if self.conf_exsit:
return
cli_str = ""
if self.state == "present":
if self.address:
if self.peer_type == 'Server':
if self.ip_ver == "IPv4":
cli_str = "%s %s" % (
"ntp unicast-server", self.address)
else:
cli_str = "%s %s" % (
"ntp unicast-server ipv6", self.address)
elif self.peer_type == 'Peer':
if self.ip_ver == "IPv4":
cli_str = "%s %s" % ("ntp unicast-peer", self.address)
else:
cli_str = "%s %s" % (
"ntp unicast-peer ipv6", self.address)
if self.key_id:
cli_str = "%s %s %s" % (
cli_str, "authentication-keyid", self.key_id)
if self.interface:
cli_str = "%s %s %s" % (
cli_str, "source-interface", self.interface)
if (self.vpn_name) and (self.vpn_name != '_public_'):
cli_str = "%s %s %s" % (
cli_str, "vpn-instance", self.vpn_name)
if self.is_preferred == "enable":
cli_str = "%s %s" % (cli_str, "preferred")
else:
if self.address:
if self.peer_type == 'Server':
if self.ip_ver == "IPv4":
cli_str = "%s %s" % (
"undo ntp unicast-server", self.address)
else:
cli_str = "%s %s" % (
"undo ntp unicast-server ipv6", self.address)
elif self.peer_type == 'Peer':
if self.ip_ver == "IPv4":
cli_str = "%s %s" % (
"undo ntp unicast-peer", self.address)
else:
cli_str = "%s %s" % (
"undo ntp unicast-peer ipv6", self.address)
if (self.vpn_name) and (self.vpn_name != '_public_'):
cli_str = "%s %s" % (cli_str, self.vpn_name)
self.updates_cmd.append(cli_str)
def work(self):
"""Excute task"""
self.get_existing()
self.get_proposed()
self.config_ntp()
self.get_update_cmd()
self.get_end_state()
self.show_result()
def main():
"""Main function entry"""
argument_spec = dict(
server=dict(type='str'),
peer=dict(type='str'),
key_id=dict(type='str'),
is_preferred=dict(type='str', choices=['enable', 'disable']),
vpn_name=dict(type='str', default='_public_'),
source_int=dict(type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(ce_argument_spec)
ntp_obj = Ntp(argument_spec)
ntp_obj.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
davehunt/kuma | vendor/packages/pyflakes/checker.py | 19 | 33324 | """
Main module.
Implement the central Checker class.
Also, it models the Bindings and Scopes.
"""
import doctest
import os
import sys
PY2 = sys.version_info < (3, 0)
PY32 = sys.version_info < (3, 3) # Python 2.5 to 3.2
PY33 = sys.version_info < (3, 4) # Python 2.5 to 3.3
builtin_vars = dir(__import__('__builtin__' if PY2 else 'builtins'))
try:
import ast
except ImportError: # Python 2.5
import _ast as ast
if 'decorator_list' not in ast.ClassDef._fields:
# Patch the missing attribute 'decorator_list'
ast.ClassDef.decorator_list = ()
ast.FunctionDef.decorator_list = property(lambda s: s.decorators)
from pyflakes import messages
if PY2:
def getNodeType(node_class):
# workaround str.upper() which is locale-dependent
return str(unicode(node_class.__name__).upper())
else:
def getNodeType(node_class):
return node_class.__name__.upper()
# Python >= 3.3 uses ast.Try instead of (ast.TryExcept + ast.TryFinally)
if PY32:
def getAlternatives(n):
if isinstance(n, (ast.If, ast.TryFinally)):
return [n.body]
if isinstance(n, ast.TryExcept):
return [n.body + n.orelse] + [[hdl] for hdl in n.handlers]
else:
def getAlternatives(n):
if isinstance(n, ast.If):
return [n.body]
if isinstance(n, ast.Try):
return [n.body + n.orelse] + [[hdl] for hdl in n.handlers]
class _FieldsOrder(dict):
"""Fix order of AST node fields."""
def _get_fields(self, node_class):
# handle iter before target, and generators before element
fields = node_class._fields
if 'iter' in fields:
key_first = 'iter'.find
elif 'generators' in fields:
key_first = 'generators'.find
else:
key_first = 'value'.find
return tuple(sorted(fields, key=key_first, reverse=True))
def __missing__(self, node_class):
self[node_class] = fields = self._get_fields(node_class)
return fields
def iter_child_nodes(node, omit=None, _fields_order=_FieldsOrder()):
"""
Yield all direct child nodes of *node*, that is, all fields that
are nodes and all items of fields that are lists of nodes.
"""
for name in _fields_order[node.__class__]:
if name == omit:
continue
field = getattr(node, name, None)
if isinstance(field, ast.AST):
yield field
elif isinstance(field, list):
for item in field:
yield item
class Binding(object):
"""
Represents the binding of a value to a name.
The checker uses this to keep track of which names have been bound and
which names have not. See L{Assignment} for a special type of binding that
is checked with stricter rules.
@ivar used: pair of (L{Scope}, line-number) indicating the scope and
line number that this binding was last used
"""
def __init__(self, name, source):
self.name = name
self.source = source
self.used = False
def __str__(self):
return self.name
def __repr__(self):
return '<%s object %r from line %r at 0x%x>' % (self.__class__.__name__,
self.name,
self.source.lineno,
id(self))
def redefines(self, other):
return isinstance(other, Definition) and self.name == other.name
class Definition(Binding):
"""
A binding that defines a function or a class.
"""
class Importation(Definition):
"""
A binding created by an import statement.
@ivar fullName: The complete name given to the import statement,
possibly including multiple dotted components.
@type fullName: C{str}
"""
def __init__(self, name, source):
self.fullName = name
self.redefined = []
name = name.split('.')[0]
super(Importation, self).__init__(name, source)
def redefines(self, other):
if isinstance(other, Importation):
return self.fullName == other.fullName
return isinstance(other, Definition) and self.name == other.name
class Argument(Binding):
"""
Represents binding a name as an argument.
"""
class Assignment(Binding):
"""
Represents binding a name with an explicit assignment.
The checker will raise warnings for any Assignment that isn't used. Also,
the checker does not consider assignments in tuple/list unpacking to be
Assignments, rather it treats them as simple Bindings.
"""
class FunctionDefinition(Definition):
pass
class ClassDefinition(Definition):
pass
class ExportBinding(Binding):
"""
A binding created by an C{__all__} assignment. If the names in the list
can be determined statically, they will be treated as names for export and
additional checking applied to them.
The only C{__all__} assignment that can be recognized is one which takes
the value of a literal list containing literal strings. For example::
__all__ = ["foo", "bar"]
Names which are imported and not otherwise used but appear in the value of
C{__all__} will not have an unused import warning reported for them.
"""
def __init__(self, name, source, scope):
if '__all__' in scope and isinstance(source, ast.AugAssign):
self.names = list(scope['__all__'].names)
else:
self.names = []
if isinstance(source.value, (ast.List, ast.Tuple)):
for node in source.value.elts:
if isinstance(node, ast.Str):
self.names.append(node.s)
super(ExportBinding, self).__init__(name, source)
class Scope(dict):
importStarred = False # set to True when import * is found
def __repr__(self):
scope_cls = self.__class__.__name__
return '<%s at 0x%x %s>' % (scope_cls, id(self), dict.__repr__(self))
class ClassScope(Scope):
pass
class FunctionScope(Scope):
"""
I represent a name scope for a function.
@ivar globals: Names declared 'global' in this function.
"""
usesLocals = False
alwaysUsed = set(['__tracebackhide__',
'__traceback_info__', '__traceback_supplement__'])
def __init__(self):
super(FunctionScope, self).__init__()
# Simplify: manage the special locals as globals
self.globals = self.alwaysUsed.copy()
self.returnValue = None # First non-empty return
self.isGenerator = False # Detect a generator
def unusedAssignments(self):
"""
Return a generator for the assignments which have not been used.
"""
for name, binding in self.items():
if (not binding.used and name not in self.globals
and not self.usesLocals
and isinstance(binding, Assignment)):
yield name, binding
class GeneratorScope(Scope):
pass
class ModuleScope(Scope):
pass
# Globally defined names which are not attributes of the builtins module, or
# are only present on some platforms.
_MAGIC_GLOBALS = ['__file__', '__builtins__', 'WindowsError']
def getNodeName(node):
# Returns node.id, or node.name, or None
if hasattr(node, 'id'): # One of the many nodes with an id
return node.id
if hasattr(node, 'name'): # a ExceptHandler node
return node.name
class Checker(object):
"""
I check the cleanliness and sanity of Python code.
@ivar _deferredFunctions: Tracking list used by L{deferFunction}. Elements
of the list are two-tuples. The first element is the callable passed
to L{deferFunction}. The second element is a copy of the scope stack
at the time L{deferFunction} was called.
@ivar _deferredAssignments: Similar to C{_deferredFunctions}, but for
callables which are deferred assignment checks.
"""
nodeDepth = 0
offset = None
traceTree = False
builtIns = set(builtin_vars).union(_MAGIC_GLOBALS)
_customBuiltIns = os.environ.get('PYFLAKES_BUILTINS')
if _customBuiltIns:
builtIns.update(_customBuiltIns.split(','))
del _customBuiltIns
def __init__(self, tree, filename='(none)', builtins=None,
withDoctest='PYFLAKES_DOCTEST' in os.environ):
self._nodeHandlers = {}
self._deferredFunctions = []
self._deferredAssignments = []
self.deadScopes = []
self.messages = []
self.filename = filename
if builtins:
self.builtIns = self.builtIns.union(builtins)
self.withDoctest = withDoctest
self.scopeStack = [ModuleScope()]
self.exceptHandlers = [()]
self.futuresAllowed = True
self.root = tree
self.handleChildren(tree)
self.runDeferred(self._deferredFunctions)
# Set _deferredFunctions to None so that deferFunction will fail
# noisily if called after we've run through the deferred functions.
self._deferredFunctions = None
self.runDeferred(self._deferredAssignments)
# Set _deferredAssignments to None so that deferAssignment will fail
# noisily if called after we've run through the deferred assignments.
self._deferredAssignments = None
del self.scopeStack[1:]
self.popScope()
self.checkDeadScopes()
def deferFunction(self, callable):
"""
Schedule a function handler to be called just before completion.
This is used for handling function bodies, which must be deferred
because code later in the file might modify the global scope. When
`callable` is called, the scope at the time this is called will be
restored, however it will contain any new bindings added to it.
"""
self._deferredFunctions.append((callable, self.scopeStack[:], self.offset))
def deferAssignment(self, callable):
"""
Schedule an assignment handler to be called just after deferred
function handlers.
"""
self._deferredAssignments.append((callable, self.scopeStack[:], self.offset))
def runDeferred(self, deferred):
"""
Run the callables in C{deferred} using their associated scope stack.
"""
for handler, scope, offset in deferred:
self.scopeStack = scope
self.offset = offset
handler()
@property
def scope(self):
return self.scopeStack[-1]
def popScope(self):
self.deadScopes.append(self.scopeStack.pop())
def checkDeadScopes(self):
"""
Look at scopes which have been fully examined and report names in them
which were imported but unused.
"""
for scope in self.deadScopes:
if isinstance(scope.get('__all__'), ExportBinding):
all_names = set(scope['__all__'].names)
if not scope.importStarred and \
os.path.basename(self.filename) != '__init__.py':
# Look for possible mistakes in the export list
undefined = all_names.difference(scope)
for name in undefined:
self.report(messages.UndefinedExport,
scope['__all__'].source, name)
else:
all_names = []
# Look for imported names that aren't used.
for value in scope.values():
if isinstance(value, Importation):
used = value.used or value.name in all_names
if not used:
messg = messages.UnusedImport
self.report(messg, value.source, value.name)
for node in value.redefined:
if isinstance(self.getParent(node), ast.For):
messg = messages.ImportShadowedByLoopVar
elif used:
continue
else:
messg = messages.RedefinedWhileUnused
self.report(messg, node, value.name, value.source)
def pushScope(self, scopeClass=FunctionScope):
self.scopeStack.append(scopeClass())
def report(self, messageClass, *args, **kwargs):
self.messages.append(messageClass(self.filename, *args, **kwargs))
def getParent(self, node):
# Lookup the first parent which is not Tuple, List or Starred
while True:
node = node.parent
if not hasattr(node, 'elts') and not hasattr(node, 'ctx'):
return node
def getCommonAncestor(self, lnode, rnode, stop):
if stop in (lnode, rnode) or not (hasattr(lnode, 'parent') and
hasattr(rnode, 'parent')):
return None
if lnode is rnode:
return lnode
if (lnode.depth > rnode.depth):
return self.getCommonAncestor(lnode.parent, rnode, stop)
if (lnode.depth < rnode.depth):
return self.getCommonAncestor(lnode, rnode.parent, stop)
return self.getCommonAncestor(lnode.parent, rnode.parent, stop)
def descendantOf(self, node, ancestors, stop):
for a in ancestors:
if self.getCommonAncestor(node, a, stop):
return True
return False
def differentForks(self, lnode, rnode):
"""True, if lnode and rnode are located on different forks of IF/TRY"""
ancestor = self.getCommonAncestor(lnode, rnode, self.root)
parts = getAlternatives(ancestor)
if parts:
for items in parts:
if self.descendantOf(lnode, items, ancestor) ^ \
self.descendantOf(rnode, items, ancestor):
return True
return False
def addBinding(self, node, value):
"""
Called when a binding is altered.
- `node` is the statement responsible for the change
- `value` is the new value, a Binding instance
"""
# assert value.source in (node, node.parent):
for scope in self.scopeStack[::-1]:
if value.name in scope:
break
existing = scope.get(value.name)
if existing and not self.differentForks(node, existing.source):
parent_stmt = self.getParent(value.source)
if isinstance(existing, Importation) and isinstance(parent_stmt, ast.For):
self.report(messages.ImportShadowedByLoopVar,
node, value.name, existing.source)
elif scope is self.scope:
if (isinstance(parent_stmt, ast.comprehension) and
not isinstance(self.getParent(existing.source),
(ast.For, ast.comprehension))):
self.report(messages.RedefinedInListComp,
node, value.name, existing.source)
elif not existing.used and value.redefines(existing):
self.report(messages.RedefinedWhileUnused,
node, value.name, existing.source)
elif isinstance(existing, Importation) and value.redefines(existing):
existing.redefined.append(node)
self.scope[value.name] = value
def getNodeHandler(self, node_class):
try:
return self._nodeHandlers[node_class]
except KeyError:
nodeType = getNodeType(node_class)
self._nodeHandlers[node_class] = handler = getattr(self, nodeType)
return handler
def handleNodeLoad(self, node):
name = getNodeName(node)
if not name:
return
# try local scope
try:
self.scope[name].used = (self.scope, node)
except KeyError:
pass
else:
return
scopes = [scope for scope in self.scopeStack[:-1]
if isinstance(scope, (FunctionScope, ModuleScope, GeneratorScope))]
if isinstance(self.scope, GeneratorScope) and scopes[-1] != self.scopeStack[-2]:
scopes.append(self.scopeStack[-2])
# try enclosing function scopes and global scope
importStarred = self.scope.importStarred
for scope in reversed(scopes):
importStarred = importStarred or scope.importStarred
try:
scope[name].used = (self.scope, node)
except KeyError:
pass
else:
return
# look in the built-ins
if importStarred or name in self.builtIns:
return
if name == '__path__' and os.path.basename(self.filename) == '__init__.py':
# the special name __path__ is valid only in packages
return
# protected with a NameError handler?
if 'NameError' not in self.exceptHandlers[-1]:
self.report(messages.UndefinedName, node, name)
def handleNodeStore(self, node):
name = getNodeName(node)
if not name:
return
# if the name hasn't already been defined in the current scope
if isinstance(self.scope, FunctionScope) and name not in self.scope:
# for each function or module scope above us
for scope in self.scopeStack[:-1]:
if not isinstance(scope, (FunctionScope, ModuleScope)):
continue
# if the name was defined in that scope, and the name has
# been accessed already in the current scope, and hasn't
# been declared global
used = name in scope and scope[name].used
if used and used[0] is self.scope and name not in self.scope.globals:
# then it's probably a mistake
self.report(messages.UndefinedLocal,
scope[name].used[1], name, scope[name].source)
break
parent_stmt = self.getParent(node)
if isinstance(parent_stmt, (ast.For, ast.comprehension)) or (
parent_stmt != node.parent and
not self.isLiteralTupleUnpacking(parent_stmt)):
binding = Binding(name, node)
elif name == '__all__' and isinstance(self.scope, ModuleScope):
binding = ExportBinding(name, node.parent, self.scope)
else:
binding = Assignment(name, node)
if name in self.scope:
binding.used = self.scope[name].used
self.addBinding(node, binding)
def handleNodeDelete(self, node):
def on_conditional_branch():
"""
Return `True` if node is part of a conditional body.
"""
current = getattr(node, 'parent', None)
while current:
if isinstance(current, (ast.If, ast.While, ast.IfExp)):
return True
current = getattr(current, 'parent', None)
return False
name = getNodeName(node)
if not name:
return
if on_conditional_branch():
# We can not predict if this conditional branch is going to
# be executed.
return
if isinstance(self.scope, FunctionScope) and name in self.scope.globals:
self.scope.globals.remove(name)
else:
try:
del self.scope[name]
except KeyError:
self.report(messages.UndefinedName, node, name)
def handleChildren(self, tree, omit=None):
for node in iter_child_nodes(tree, omit=omit):
self.handleNode(node, tree)
def isLiteralTupleUnpacking(self, node):
if isinstance(node, ast.Assign):
for child in node.targets + [node.value]:
if not hasattr(child, 'elts'):
return False
return True
def isDocstring(self, node):
"""
Determine if the given node is a docstring, as long as it is at the
correct place in the node tree.
"""
return isinstance(node, ast.Str) or (isinstance(node, ast.Expr) and
isinstance(node.value, ast.Str))
def getDocstring(self, node):
if isinstance(node, ast.Expr):
node = node.value
if not isinstance(node, ast.Str):
return (None, None)
# Computed incorrectly if the docstring has backslash
doctest_lineno = node.lineno - node.s.count('\n') - 1
return (node.s, doctest_lineno)
def handleNode(self, node, parent):
if node is None:
return
if self.offset and getattr(node, 'lineno', None) is not None:
node.lineno += self.offset[0]
node.col_offset += self.offset[1]
if self.traceTree:
print(' ' * self.nodeDepth + node.__class__.__name__)
if self.futuresAllowed and not (isinstance(node, ast.ImportFrom) or
self.isDocstring(node)):
self.futuresAllowed = False
self.nodeDepth += 1
node.depth = self.nodeDepth
node.parent = parent
try:
handler = self.getNodeHandler(node.__class__)
handler(node)
finally:
self.nodeDepth -= 1
if self.traceTree:
print(' ' * self.nodeDepth + 'end ' + node.__class__.__name__)
_getDoctestExamples = doctest.DocTestParser().get_examples
def handleDoctests(self, node):
try:
(docstring, node_lineno) = self.getDocstring(node.body[0])
examples = docstring and self._getDoctestExamples(docstring)
except (ValueError, IndexError):
# e.g. line 6 of the docstring for <string> has inconsistent
# leading whitespace: ...
return
if not examples:
return
node_offset = self.offset or (0, 0)
self.pushScope()
underscore_in_builtins = '_' in self.builtIns
if not underscore_in_builtins:
self.builtIns.add('_')
for example in examples:
try:
tree = compile(example.source, "<doctest>", "exec", ast.PyCF_ONLY_AST)
except SyntaxError:
e = sys.exc_info()[1]
position = (node_lineno + example.lineno + e.lineno,
example.indent + 4 + (e.offset or 0))
self.report(messages.DoctestSyntaxError, node, position)
else:
self.offset = (node_offset[0] + node_lineno + example.lineno,
node_offset[1] + example.indent + 4)
self.handleChildren(tree)
self.offset = node_offset
if not underscore_in_builtins:
self.builtIns.remove('_')
self.popScope()
def ignore(self, node):
pass
# "stmt" type nodes
DELETE = PRINT = FOR = WHILE = IF = WITH = WITHITEM = RAISE = \
TRYFINALLY = ASSERT = EXEC = EXPR = ASSIGN = handleChildren
CONTINUE = BREAK = PASS = ignore
# "expr" type nodes
BOOLOP = BINOP = UNARYOP = IFEXP = DICT = SET = \
COMPARE = CALL = REPR = ATTRIBUTE = SUBSCRIPT = LIST = TUPLE = \
STARRED = NAMECONSTANT = handleChildren
NUM = STR = BYTES = ELLIPSIS = ignore
# "slice" type nodes
SLICE = EXTSLICE = INDEX = handleChildren
# expression contexts are node instances too, though being constants
LOAD = STORE = DEL = AUGLOAD = AUGSTORE = PARAM = ignore
# same for operators
AND = OR = ADD = SUB = MULT = DIV = MOD = POW = LSHIFT = RSHIFT = \
BITOR = BITXOR = BITAND = FLOORDIV = INVERT = NOT = UADD = USUB = \
EQ = NOTEQ = LT = LTE = GT = GTE = IS = ISNOT = IN = NOTIN = ignore
# additional node types
COMPREHENSION = KEYWORD = handleChildren
def GLOBAL(self, node):
"""
Keep track of globals declarations.
"""
# In doctests, the global scope is an anonymous function at index 1.
global_scope_index = 1 if self.withDoctest else 0
global_scope = self.scopeStack[global_scope_index]
# Ignore 'global' statement in global scope.
if self.scope is not global_scope:
# One 'global' statement can bind multiple (comma-delimited) names.
for node_name in node.names:
node_value = Assignment(node_name, node)
# Remove UndefinedName messages already reported for this name.
self.messages = [
m for m in self.messages if not
isinstance(m, messages.UndefinedName) and not
m.message_args[0] == node_name]
# Bind name to global scope if it doesn't exist already.
global_scope.setdefault(node_name, node_value)
# Bind name to non-global scopes, but as already "used".
node_value.used = (global_scope, node)
for scope in self.scopeStack[global_scope_index + 1:]:
scope[node_name] = node_value
NONLOCAL = GLOBAL
def GENERATOREXP(self, node):
self.pushScope(GeneratorScope)
self.handleChildren(node)
self.popScope()
LISTCOMP = handleChildren if PY2 else GENERATOREXP
DICTCOMP = SETCOMP = GENERATOREXP
def NAME(self, node):
"""
Handle occurrence of Name (which can be a load/store/delete access.)
"""
# Locate the name in locals / function / globals scopes.
if isinstance(node.ctx, (ast.Load, ast.AugLoad)):
self.handleNodeLoad(node)
if (node.id == 'locals' and isinstance(self.scope, FunctionScope)
and isinstance(node.parent, ast.Call)):
# we are doing locals() call in current scope
self.scope.usesLocals = True
elif isinstance(node.ctx, (ast.Store, ast.AugStore)):
self.handleNodeStore(node)
elif isinstance(node.ctx, ast.Del):
self.handleNodeDelete(node)
else:
# must be a Param context -- this only happens for names in function
# arguments, but these aren't dispatched through here
raise RuntimeError("Got impossible expression context: %r" % (node.ctx,))
def RETURN(self, node):
if isinstance(self.scope, ClassScope):
self.report(messages.ReturnOutsideFunction, node)
return
if (
node.value and
hasattr(self.scope, 'returnValue') and
not self.scope.returnValue
):
self.scope.returnValue = node.value
self.handleNode(node.value, node)
def YIELD(self, node):
self.scope.isGenerator = True
self.handleNode(node.value, node)
YIELDFROM = YIELD
def FUNCTIONDEF(self, node):
for deco in node.decorator_list:
self.handleNode(deco, node)
self.LAMBDA(node)
self.addBinding(node, FunctionDefinition(node.name, node))
if self.withDoctest:
self.deferFunction(lambda: self.handleDoctests(node))
def LAMBDA(self, node):
args = []
annotations = []
if PY2:
def addArgs(arglist):
for arg in arglist:
if isinstance(arg, ast.Tuple):
addArgs(arg.elts)
else:
args.append(arg.id)
addArgs(node.args.args)
defaults = node.args.defaults
else:
for arg in node.args.args + node.args.kwonlyargs:
args.append(arg.arg)
annotations.append(arg.annotation)
defaults = node.args.defaults + node.args.kw_defaults
# Only for Python3 FunctionDefs
is_py3_func = hasattr(node, 'returns')
for arg_name in ('vararg', 'kwarg'):
wildcard = getattr(node.args, arg_name)
if not wildcard:
continue
args.append(wildcard if PY33 else wildcard.arg)
if is_py3_func:
if PY33: # Python 2.5 to 3.3
argannotation = arg_name + 'annotation'
annotations.append(getattr(node.args, argannotation))
else: # Python >= 3.4
annotations.append(wildcard.annotation)
if is_py3_func:
annotations.append(node.returns)
if len(set(args)) < len(args):
for (idx, arg) in enumerate(args):
if arg in args[:idx]:
self.report(messages.DuplicateArgument, node, arg)
for child in annotations + defaults:
if child:
self.handleNode(child, node)
def runFunction():
self.pushScope()
for name in args:
self.addBinding(node, Argument(name, node))
if isinstance(node.body, list):
# case for FunctionDefs
for stmt in node.body:
self.handleNode(stmt, node)
else:
# case for Lambdas
self.handleNode(node.body, node)
def checkUnusedAssignments():
"""
Check to see if any assignments have not been used.
"""
for name, binding in self.scope.unusedAssignments():
self.report(messages.UnusedVariable, binding.source, name)
self.deferAssignment(checkUnusedAssignments)
if PY32:
def checkReturnWithArgumentInsideGenerator():
"""
Check to see if there is any return statement with
arguments but the function is a generator.
"""
if self.scope.isGenerator and self.scope.returnValue:
self.report(messages.ReturnWithArgsInsideGenerator,
self.scope.returnValue)
self.deferAssignment(checkReturnWithArgumentInsideGenerator)
self.popScope()
self.deferFunction(runFunction)
def CLASSDEF(self, node):
"""
Check names used in a class definition, including its decorators, base
classes, and the body of its definition. Additionally, add its name to
the current scope.
"""
for deco in node.decorator_list:
self.handleNode(deco, node)
for baseNode in node.bases:
self.handleNode(baseNode, node)
if not PY2:
for keywordNode in node.keywords:
self.handleNode(keywordNode, node)
self.pushScope(ClassScope)
if self.withDoctest:
self.deferFunction(lambda: self.handleDoctests(node))
for stmt in node.body:
self.handleNode(stmt, node)
self.popScope()
self.addBinding(node, ClassDefinition(node.name, node))
def AUGASSIGN(self, node):
self.handleNodeLoad(node.target)
self.handleNode(node.value, node)
self.handleNode(node.target, node)
def IMPORT(self, node):
for alias in node.names:
name = alias.asname or alias.name
importation = Importation(name, node)
self.addBinding(node, importation)
def IMPORTFROM(self, node):
if node.module == '__future__':
if not self.futuresAllowed:
self.report(messages.LateFutureImport,
node, [n.name for n in node.names])
else:
self.futuresAllowed = False
for alias in node.names:
if alias.name == '*':
self.scope.importStarred = True
self.report(messages.ImportStarUsed, node, node.module)
continue
name = alias.asname or alias.name
importation = Importation(name, node)
if node.module == '__future__':
importation.used = (self.scope, node)
self.addBinding(node, importation)
def TRY(self, node):
handler_names = []
# List the exception handlers
for handler in node.handlers:
if isinstance(handler.type, ast.Tuple):
for exc_type in handler.type.elts:
handler_names.append(getNodeName(exc_type))
elif handler.type:
handler_names.append(getNodeName(handler.type))
# Memorize the except handlers and process the body
self.exceptHandlers.append(handler_names)
for child in node.body:
self.handleNode(child, node)
self.exceptHandlers.pop()
# Process the other nodes: "except:", "else:", "finally:"
self.handleChildren(node, omit='body')
TRYEXCEPT = TRY
def EXCEPTHANDLER(self, node):
# 3.x: in addition to handling children, we must handle the name of
# the exception, which is not a Name node, but a simple string.
if isinstance(node.name, str):
self.handleNodeStore(node)
self.handleChildren(node)
| mpl-2.0 |
petr-muller/beakerlib | src/python/rlMemAvg.py | 1 | 1621 | #!/usr/bin/python
# Authors: Petr Muller <pmuller@redhat.com>
#
# Description: Prints memory consumption average for an executed program
#
# Copyright (c) 2008 Red Hat, Inc. All rights reserved. This copyrighted
# material is made available to anyone wishing to use, modify, copy, or
# redistribute it subject to the terms and conditions of the GNU General
# Public License v.2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys, time, re
use_sub = False
use_popen = False
try:
import subprocess
use_sub = True
except ImportError:
import popen2
use_popen = True
if len(sys.argv) < 2:
print 'syntax: rlMemAvg <command>'
sys.exit(1)
proglist = sys.argv[1:]
if use_sub:
task = subprocess.Popen(proglist)
elif use_popen:
task = popen2.Popen3(" ".join(proglist))
memsum = 0
tick = 0
fn = '/proc/%d/status' % task.pid
mre = re.compile(r'VmRSS:[ \t]+(?P<mem>\d+)')
while True:
for line in open(fn, 'r').readlines():
m = mre.search(line)
if m:
mem = int(m.group('mem'))
memsum += mem
tick += 1
break
time.sleep(0.1)
finish = task.poll()
if (use_sub and finish != None) or (use_popen and finish != -1):
break
print "%d" % (memsum/tick)
| gpl-2.0 |
awkspace/ansible | lib/ansible/modules/cloud/azure/azure_rm_availabilityset_facts.py | 8 | 4695 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Julien Stroheker <juliens@microsoft.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_availabilityset_facts
version_added: "2.4"
short_description: Get availability set facts.
description:
- Get facts for a specific availability set or all availability sets.
options:
name:
description:
- Limit results to a specific availability set
resource_group:
description:
- The resource group to search for the desired availability set
tags:
description:
- List of tags to be matched
extends_documentation_fragment:
- azure
author:
- "Julien Stroheker (@julienstroheker)"
'''
EXAMPLES = '''
- name: Get facts for one availability set
azure_rm_availabilityset_facts:
name: Testing
resource_group: myResourceGroup
- name: Get facts for all availability sets in a specific resource group
azure_rm_availabilityset_facts:
resource_group: myResourceGroup
'''
RETURN = '''
azure_availabilityset:
description: List of availability sets dicts.
returned: always
type: list
example: [{
"location": "eastus2",
"name": "myavailabilityset",
"properties": {
"platformFaultDomainCount": 3,
"platformUpdateDomainCount": 2,
"virtualMachines": []
},
"sku": "Aligned",
"type": "Microsoft.Compute/availabilitySets"
}]
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
except Exception:
# handled in azure_rm_common
pass
AZURE_OBJECT_CLASS = 'AvailabilitySet'
class AzureRMAvailabilitySetFacts(AzureRMModuleBase):
"""Utility class to get availability set facts"""
def __init__(self):
self.module_args = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list')
)
self.results = dict(
changed=False,
ansible_facts=dict(
azure_availabilitysets=[]
)
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMAvailabilitySetFacts, self).__init__(
derived_arg_spec=self.module_args,
supports_tags=False,
facts_module=True
)
def exec_module(self, **kwargs):
for key in self.module_args:
setattr(self, key, kwargs[key])
if self.name and not self.resource_group:
self.fail("Parameter error: resource group required when filtering by name.")
if self.name:
self.results['ansible_facts']['azure_availabilitysets'] = self.get_item()
else:
self.results['ansible_facts']['azure_availabilitysets'] = self.list_items()
return self.results
def get_item(self):
"""Get a single availability set"""
self.log('Get properties for {0}'.format(self.name))
item = None
result = []
try:
item = self.compute_client.availability_sets.get(self.resource_group, self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
avase = self.serialize_obj(item, AZURE_OBJECT_CLASS)
avase['name'] = item.name
avase['type'] = item.type
avase['sku'] = item.sku.name
result = [avase]
return result
def list_items(self):
"""Get all availability sets"""
self.log('List all availability sets')
try:
response = self.compute_client.availability_sets.list(self.resource_group)
except CloudError as exc:
self.fail('Failed to list all items - {0}'.format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
avase = self.serialize_obj(item, AZURE_OBJECT_CLASS)
avase['name'] = item.name
avase['type'] = item.type
avase['sku'] = item.sku.name
results.append(avase)
return results
def main():
"""Main module execution code path"""
AzureRMAvailabilitySetFacts()
if __name__ == '__main__':
main()
| gpl-3.0 |
sharkerz/capablanca | test/test_piece.py | 1 | 2560 | # -*- coding: utf-8 -*-
# Copyright 2015, Dario Blanco
"""
This module tests capablanca.piece
"""
from capablanca import piece
def test_king_positions():
"""Should return proper threat positions when moving a King"""
king = piece.King(3, 3)
assert king.get_threats((1, 1)) == set([
(0, 0), (1, 0), (2, 0), (0, 1), (2, 1), (0, 2), (1, 2), (2, 2)
])
assert king.get_threats((0, 0)) == set([(1, 0), (0, 1), (1, 1)])
assert king.get_threats((2, 2)) == set([(1, 1), (2, 1), (1, 2)])
king = piece.King(1, 3)
assert (
king.get_threats((0, 2)) == king.get_threats((0, 0)) == set([(0, 1)]))
king = piece.King(1, 3)
assert king.get_threats((0, 1)) == set([(0, 0), (0, 2)])
def test_bishop_positions():
"""Should return proper threat positions when moving a Bishop"""
bishop = piece.Bishop(3, 4)
assert bishop.get_threats((1, 2)) == set([(2, 3), (2, 1), (0, 3), (0, 1)])
assert bishop.get_threats((0, 0)) == set([(1, 1), (2, 2)])
assert bishop.get_threats((2, 3)) == set([(1, 2), (0, 1)])
def test_rook_positions():
"""Should return proper threat positions when moving a Rook"""
rook = piece.Rook(3, 4)
assert rook.get_threats((1, 1)) == set([
(0, 1), (1, 0), (1, 2), (1, 3), (2, 1)])
assert rook.get_threats((0, 0)) == set([
(0, 1), (0, 2), (0, 3), (1, 0), (2, 0)])
assert rook.get_threats((2, 3)) == set([
(0, 3), (1, 3), (2, 0), (2, 1), (2, 2)])
def test_queen_positions():
"""Should return proper threat positions when moving a Queen"""
queen = piece.Queen(6, 6)
assert queen.get_threats((3, 3)) == set([
(4, 4), (5, 5), (4, 2), (5, 1), (2, 4), (1, 5), (2, 2), (1, 1), (0, 0),
(3, 0), (3, 1), (3, 2), (3, 4), (3, 5), (0, 3), (1, 3), (2, 3), (4, 3),
(5, 3)
])
assert queen.get_threats((0, 0)) == set([
(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (0, 1), (0, 2), (0, 3), (0, 4),
(0, 5), (1, 0), (2, 0), (3, 0), (4, 0), (5, 0)
])
assert queen.get_threats((6, 6)) == set([
(6, 0), (6, 1), (6, 2), (6, 3), (6, 4), (6, 5), (0, 6), (1, 6), (2, 6),
(3, 6), (4, 6), (5, 6)
])
def test_knight_positions():
"""Should return proper threat positions when moving a Knight"""
knight = piece.Knight(6, 6)
assert knight.get_threats((3, 3)) == set([
(5, 4), (5, 2), (4, 5), (4, 1), (1, 2), (1, 4), (2, 5), (2, 1)
])
assert knight.get_threats((0, 0)) == set([(2, 1), (1, 2)])
assert knight.get_threats((6, 6)) == set([(4, 5), (5, 4)])
| gpl-2.0 |
kijiproject/kiji-mapreduce | kiji-mapreduce/src/test/profiling/scripts/test_kijistats.py | 2 | 5307 | #!/usr/bin/python
# Copyright 2013 WibiData, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
test_kijistats.py is a script to unit test the kijistats.py script to ensure it is
performing the expected computations, such as aggregating based on jobname or function name.
It uses pre-existing files in kiji-mapreduce/src/test/profiling/resources/ that
contain some sample output that was collected from some profiled MapReduce jobs.
The file name of the resource files represents a MapReduce task attempt id.
The format of the resources files is as follows:
Job Name, Job ID, Task Attempt, Function Signature, Aggregate Time (nanoseconds), Number of Invocations, Time per call (nanoseconds)
From *this* directory, you can run the test script as:
./test_kijistats.py
On MacOSX, you may need to run this as:
arch -i386 /usr/bin/python test_kijistats.py
'''
import os
import sys
import unittest
# path to where the kijistats script is located
srcpath = os.path.abspath(os.getcwd() + '/../../../main/profiling/scripts')
sys.path.insert(0, srcpath)
import kijistats
import tempfile
# Unit test for the kijistats.py script
class TestKijiStats(unittest.TestCase):
def setUp(self):
# The directory in which the resource files containing sample profiling data reside.
self.input_dir = os.path.abspath(os.getcwd() + '/../resources')
def testArgsForJob(self):
# Raise error since we must specify either job id, job name, task attempt or function
# for which you wish to collect stats.
self.assertRaises(ValueError, kijistats.main, ['kijistats', '--to-file', 'xyz'])
# Aggregate profiling stats according to Job Name. E.g. You may like to see if the stats are
# consistent across all runs of a particular Gatherer, in this case, TestingGatherer.
def testByJobName(self):
expectedOutput = "public synchronized org.kiji.schema.KijiSchemaTable.SchemaEntry " + \
"org.kiji.schema.impl.HBaseSchemaTable.getSchemaEntry(org.kiji.schema.util.BytesKey)," + \
" 278514701, 875, 709618.05"
tf = tempfile.NamedTemporaryFile(delete=False)
filename = tf.name
kijistats.main(['./kijistats.py',
'--stats-dir',
self.input_dir,
'--by-jobname',
'TestingGatherer',
'--to-file',
filename])
with open(filename) as f:
for line in f:
if "getSchemaEntry" in line:
self.assertEqual(expectedOutput, line.rstrip())
return
self.fail("Expected line not found in output")
# Aggregate profiling stats across all calls to a certain function. E.g. You may want to ensure
# that avro counter encoder behaves consistently across all calls to it.
def testByFunction(self):
expectedOutput = "attempt_local1340085606_0004_m_000000_0, 157036000, 320, 490737.5"
tf = tempfile.NamedTemporaryFile(delete=False)
filename = tf.name
kijistats.main(['./kijistats',
'--stats-dir',
self.input_dir,
'--by-function',
'getSchemaEntry',
'--to-file',
filename])
with open(filename) as f:
for line in f:
if "attempt_local1340085606_0004_m_000000_0" in line:
self.assertEqual(expectedOutput, line.rstrip())
return
self.fail("Expected line not found in output")
# Aggregate profiling stats across all attempts of a particular job. Instead of per attempt, you
# may wish to see how a function performs across all attempts to get an average.
def testByJobID(self):
expectedOutput = "public synchronized org.kiji.schema.KijiSchemaTable.SchemaEntry " + \
"org.kiji.schema.impl.HBaseSchemaTable.getSchemaEntry(org.kiji.schema.util.BytesKey)," + \
" 157036000, 320, 490737.5"
tf = tempfile.NamedTemporaryFile(delete=False)
filename = tf.name
kijistats.main(['./kijistats',
'--stats-dir',
self.input_dir,
'--by-job',
'job_local1340085606_0004',
'--to-file',
filename])
with open(filename) as f:
for line in f:
if "getSchemaEntry" in line:
self.assertEqual(expectedOutput, line.rstrip())
return
self.fail("Expected line not found in output")
suite = unittest.TestLoader().loadTestsFromTestCase(TestKijiStats)
unittest.TextTestRunner(verbosity=2).run(suite)
| apache-2.0 |
interactiveinstitute/watthappened | python_modules/restkit/conn.py | 2 | 3307 | # -*- coding: utf-8 -
#
# This file is part of restkit released under the MIT license.
# See the NOTICE for more information.
import logging
import random
import select
import socket
import ssl
import time
import cStringIO
from socketpool import Connector
from socketpool.util import is_connected
CHUNK_SIZE = 16 * 1024
MAX_BODY = 1024 * 112
DNS_TIMEOUT = 60
class Connection(Connector):
def __init__(self, host, port, backend_mod=None, pool=None,
is_ssl=False, extra_headers=[], proxy_pieces=None, **ssl_args):
# connect the socket, if we are using an SSL connection, we wrap
# the socket.
self._s = backend_mod.Socket(socket.AF_INET, socket.SOCK_STREAM)
self._s.connect((host, port))
if proxy_pieces:
self._s.sendall(proxy_pieces)
response = cStringIO.StringIO()
while response.getvalue()[-4:] != '\r\n\r\n':
response.write(self._s.recv(1))
response.close()
if is_ssl:
self._s = ssl.wrap_socket(self._s, **ssl_args)
self.extra_headers = extra_headers
self.is_ssl = is_ssl
self.backend_mod = backend_mod
self.host = host
self.port = port
self._connected = True
self._life = time.time() - random.randint(0, 10)
self._pool = pool
self._released = False
def matches(self, **match_options):
target_host = match_options.get('host')
target_port = match_options.get('port')
return target_host == self.host and target_port == self.port
def is_connected(self):
if self._connected:
return is_connected(self._s)
return False
def handle_exception(self, exception):
raise
def get_lifetime(self):
return self._life
def invalidate(self):
self.close()
self._connected = False
self._life = -1
def release(self, should_close=False):
if self._pool is not None:
if self._connected:
if should_close:
self.invalidate()
self._pool.release_connection(self)
else:
self._pool = None
elif self._connected:
self.invalidate()
def close(self):
if not self._s or not hasattr(self._s, "close"):
return
try:
self._s.close()
except:
pass
def socket(self):
return self._s
def send_chunk(self, data):
chunk = "".join(("%X\r\n" % len(data), data, "\r\n"))
self._s.sendall(chunk)
def send(self, data, chunked=False):
if chunked:
return self.send_chunk(data)
return self._s.sendall(data)
def sendlines(self, lines, chunked=False):
for line in list(lines):
self.send(line, chunked=chunked)
# TODO: add support for sendfile api
def sendfile(self, data, chunked=False):
""" send a data from a FileObject """
if hasattr(data, 'seek'):
data.seek(0)
while True:
binarydata = data.read(CHUNK_SIZE)
if binarydata == '':
break
self.send(binarydata, chunked=chunked)
def recv(self, size=1024):
return self._s.recv(size)
| mit |
yanirs/servo | tests/wpt/web-platform-tests/tools/wptserve/tests/functional/test_cookies.py | 299 | 1996 | import os
import unittest
import urllib2
import json
import wptserve
from base import TestUsingServer, doc_root
class TestResponseSetCookie(TestUsingServer):
def test_name_value(self):
@wptserve.handlers.handler
def handler(request, response):
response.set_cookie("name", "value")
return "Test"
route = ("GET", "/test/name_value", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertEquals(resp.info()["Set-Cookie"], "name=value; Path=/")
def test_unset(self):
@wptserve.handlers.handler
def handler(request, response):
response.set_cookie("name", "value")
response.unset_cookie("name")
return "Test"
route = ("GET", "/test/unset", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertTrue("Set-Cookie" not in resp.info())
def test_delete(self):
@wptserve.handlers.handler
def handler(request, response):
response.delete_cookie("name")
return "Test"
route = ("GET", "/test/delete", handler)
self.server.router.register(*route)
resp = self.request(route[1])
parts = dict(item.split("=") for
item in resp.info()["Set-Cookie"].split("; ") if item)
self.assertEquals(parts["name"], "")
self.assertEquals(parts["Path"], "/")
#Should also check that expires is in the past
class TestRequestCookies(TestUsingServer):
def test_set_cookie(self):
@wptserve.handlers.handler
def handler(request, response):
return request.cookies["name"].value
route = ("GET", "/test/set_cookie", handler)
self.server.router.register(*route)
resp = self.request(route[1], headers={"Cookie": "name=value"})
self.assertEquals(resp.read(), "value")
if __name__ == '__main__':
unittest.main()
| mpl-2.0 |
rscnt/django-cms | cms/tests/test_site.py | 46 | 5902 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import copy
from cms.utils.urlutils import admin_reverse
from django.contrib.sites.models import Site
from cms.api import create_page
from cms.models import Page, Placeholder
from cms.utils import get_cms_setting
from cms.test_utils.testcases import CMSTestCase
class SiteTestCase(CMSTestCase):
"""Site framework specific test cases.
All stuff which is changing settings.SITE_ID for tests should come here.
"""
def setUp(self):
self.assertEqual(Site.objects.all().count(), 1)
with self.settings(SITE_ID=1):
u = self._create_user("test", True, True)
# setup sites
self.site2 = Site.objects.create(domain="sample2.com", name="sample2.com", pk=2)
self.site3 = Site.objects.create(domain="sample3.com", name="sample3.com", pk=3)
self._login_context = self.login_user_context(u)
self._login_context.__enter__()
def tearDown(self):
self._login_context.__exit__(None, None, None)
def test_site_framework(self):
#Test the site framework, and test if it's possible to disable it
with self.settings(SITE_ID=self.site2.pk):
create_page("page_2a", "nav_playground.html", "de", site=self.site2)
response = self.client.get("/en/admin/cms/page/?site__exact=%s" % self.site3.pk)
self.assertEqual(response.status_code, 200)
create_page("page_3b", "nav_playground.html", "de", site=self.site3)
with self.settings(SITE_ID=self.site3.pk):
create_page("page_3a", "nav_playground.html", "nl", site=self.site3)
# with param
self.assertEqual(Page.objects.on_site(self.site2.pk).count(), 1)
self.assertEqual(Page.objects.on_site(self.site3.pk).count(), 2)
self.assertEqual(Page.objects.drafts().on_site().count(), 2)
with self.settings(SITE_ID=self.site2.pk):
# without param
self.assertEqual(Page.objects.drafts().on_site().count(), 1)
def test_site_preview(self):
page = create_page("page", "nav_playground.html", "de", site=self.site2, published=True)
with self.login_user_context(self.get_superuser()):
response = self.client.get(admin_reverse('cms_page_preview_page', args=[page.pk, 'de']))
self.assertEqual(response.status_code, 302)
self.assertEqual(response._headers['location'][1], 'http://sample2.com/de/?%s&language=de' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
def test_site_publish(self):
self._login_context.__exit__(None, None, None)
pages = {"2": list(range(0, 5)), "3": list(range(0, 5))}
lang_settings = copy.deepcopy(get_cms_setting('LANGUAGES'))
lang_settings[3][1]['public'] = True
with self.settings(CMS_LANGUAGES=lang_settings, LANGUAGE_CODE="de"):
with self.settings(SITE_ID=self.site2.pk):
pages["2"][0] = create_page("page_2", "nav_playground.html", "de",
site=self.site2)
pages["2"][0].publish('de')
pages["2"][1] = create_page("page_2_1", "nav_playground.html", "de",
parent=pages["2"][0], site=self.site2)
pages["2"][2] = create_page("page_2_2", "nav_playground.html", "de",
parent=pages["2"][0], site=self.site2)
pages["2"][3] = create_page("page_2_1_1", "nav_playground.html", "de",
parent=pages["2"][1], site=self.site2)
pages["2"][4] = create_page("page_2_1_2", "nav_playground.html", "de",
parent=pages["2"][1], site=self.site2)
for page in pages["2"]:
page.publish('de')
for page in pages["2"]:
if page.is_home:
page_url = "/de/"
else:
page_url = page.get_absolute_url(language='de')
response = self.client.get(page_url)
self.assertEqual(response.status_code, 200)
with self.settings(SITE_ID=self.site3.pk):
pages["3"][0] = create_page("page_3", "nav_playground.html", "de",
site=self.site3)
pages["3"][0].publish('de')
pages["3"][1] = create_page("page_3_1", "nav_playground.html", "de",
parent=pages["3"][0], site=self.site3)
pages["3"][2] = create_page("page_3_2", "nav_playground.html", "de",
parent=pages["3"][0], site=self.site3)
pages["3"][3] = create_page("page_3_1_1", "nav_playground.html", "de",
parent=pages["3"][1], site=self.site3)
pages["3"][4] = create_page("page_3_1_2", "nav_playground.html", "de",
parent=pages["3"][1], site=self.site3)
for page in pages["3"]:
page.publish('de')
for page in pages["3"]:
if page.is_home:
page_url = "/de/"
else:
page_url = page.get_absolute_url(language='de')
response = self.client.get(page_url)
self.assertEqual(response.status_code, 200)
def test_site_delete(self):
with self.settings(SITE_ID=self.site2.pk):
create_page("page_2a", "nav_playground.html", "de", site=self.site2)
self.assertEqual(Placeholder.objects.count(), 2)
self.site2.delete()
self.assertEqual(Placeholder.objects.count(), 0)
| bsd-3-clause |
israeltobias/DownMedia | youtube-dl/youtube_dl/extractor/miomio.py | 15 | 4681 | # coding: utf-8
from __future__ import unicode_literals
import random
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
xpath_text,
int_or_none,
ExtractorError,
sanitized_Request,
)
class MioMioIE(InfoExtractor):
IE_NAME = 'miomio.tv'
_VALID_URL = r'https?://(?:www\.)?miomio\.tv/watch/cc(?P<id>[0-9]+)'
_TESTS = [{
# "type=video" in flashvars
'url': 'http://www.miomio.tv/watch/cc88912/',
'info_dict': {
'id': '88912',
'ext': 'flv',
'title': '【SKY】字幕 铠武昭和VS平成 假面骑士大战FEAT战队 魔星字幕组 字幕',
'duration': 5923,
},
'skip': 'Unable to load videos',
}, {
'url': 'http://www.miomio.tv/watch/cc184024/',
'info_dict': {
'id': '43729',
'title': '《动漫同人插画绘制》',
},
'playlist_mincount': 86,
'skip': 'Unable to load videos',
}, {
'url': 'http://www.miomio.tv/watch/cc173113/',
'info_dict': {
'id': '173113',
'title': 'The New Macbook 2015 上手试玩与简评'
},
'playlist_mincount': 2,
'skip': 'Unable to load videos',
}, {
# new 'h5' player
'url': 'http://www.miomio.tv/watch/cc273997/',
'md5': '0b27a4b4495055d826813f8c3a6b2070',
'info_dict': {
'id': '273997',
'ext': 'mp4',
'title': 'マツコの知らない世界【劇的進化SP!ビニール傘&冷凍食品2016】 1_2 - 16 05 31',
},
}]
def _extract_mioplayer(self, webpage, video_id, title, http_headers):
xml_config = self._search_regex(
r'flashvars="type=(?:sina|video)&(.+?)&',
webpage, 'xml config')
# skipping the following page causes lags and eventually connection drop-outs
self._request_webpage(
'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/xml.php?id=%s&r=%s' % (id, random.randint(100, 999)),
video_id)
vid_config_request = sanitized_Request(
'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/sina.php?{0}'.format(xml_config),
headers=http_headers)
# the following xml contains the actual configuration information on the video file(s)
vid_config = self._download_xml(vid_config_request, video_id)
if not int_or_none(xpath_text(vid_config, 'timelength')):
raise ExtractorError('Unable to load videos!', expected=True)
entries = []
for f in vid_config.findall('./durl'):
segment_url = xpath_text(f, 'url', 'video url')
if not segment_url:
continue
order = xpath_text(f, 'order', 'order')
segment_id = video_id
segment_title = title
if order:
segment_id += '-%s' % order
segment_title += ' part %s' % order
entries.append({
'id': segment_id,
'url': segment_url,
'title': segment_title,
'duration': int_or_none(xpath_text(f, 'length', 'duration'), 1000),
'http_headers': http_headers,
})
return entries
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_meta(
'description', webpage, 'title', fatal=True)
mioplayer_path = self._search_regex(
r'src="(/mioplayer(?:_h5)?/[^"]+)"', webpage, 'ref_path')
if '_h5' in mioplayer_path:
player_url = compat_urlparse.urljoin(url, mioplayer_path)
player_webpage = self._download_webpage(
player_url, video_id,
note='Downloading player webpage', headers={'Referer': url})
entries = self._parse_html5_media_entries(player_url, player_webpage, video_id)
http_headers = {'Referer': player_url}
else:
http_headers = {'Referer': 'http://www.miomio.tv%s' % mioplayer_path}
entries = self._extract_mioplayer(webpage, video_id, title, http_headers)
if len(entries) == 1:
segment = entries[0]
segment['id'] = video_id
segment['title'] = title
segment['http_headers'] = http_headers
return segment
return {
'_type': 'multi_video',
'id': video_id,
'entries': entries,
'title': title,
'http_headers': http_headers,
}
| gpl-3.0 |
adamrj/Debate-Sentiment-Analysis | project/sentiment/views.py | 1 | 3556 | from django.shortcuts import render, redirect
from django.views.generic import View
from sentiment.models import Tweet, Word
from sentiment.bayes import *
from django.db.models import Avg
from django.http import JsonResponse
class IndexView(View):
template = 'sentiment/index.html'
def get(self, request):
return render(request, self.template)
class BarChartView(View):
candidate_list = ["Trump", "Bush", "Walker", "Huckabee", "Carson", "Cruz", "Rubio", "Paul", "Christie", "Kasich"]
def get(self,request):
candidate_dict = {}
for candidate in self.candidate_list:
score = Tweet.objects.filter(candidate = candidate).aggregate(Avg('score'))
candidate_dict[candidate] = score
return JsonResponse({"candidates":candidate_dict})
class SecondView(View):
template = 'sentiment/second.html'
candidate_list = ["Trump", "Bush", "Walker", "Huckabee", "Carson", "Cruz", "Rubio", "Paul", "Christie", "Kasich"]
def get(self, request):
return render(request, self.template, {'candidates': self.candidate_list})
class LineChartView(View):
candidate_list = ["Trump", "Bush", "Walker", "Huckabee", "Carson", "Cruz", "Rubio", "Paul", "Christie", "Kasich"]
def get(self,request):
candidate_dict = {}
for candidate in self.candidate_list:
tweets = Tweet.objects.filter(candidate = candidate)
### Create list of all times
tweet_time_list = {}
for hours in range(0,4):
for minutes in range(0,60):
tweet_time_list[hours*60+minutes] = []
### Assigns all tweets to a time in the time_list
for tweet in tweets:
for time in tweet_time_list:
if tweet.date.hour * 60 + tweet.date.minute == time:
tweet_time_list[time].append(tweet)
## Calculates rolling average
rolling_average_list = {}
for x in range(6,4*60):
score = 0
length = 0
for y in range(x-5,x+1):
tweets = tweet_time_list[y]
for tweet in tweets:
length += 1
score += tweet.score
if length > 0:
rolling_average = score / length
rolling_average_list[x] = rolling_average
candidate_dict[candidate] = rolling_average_list
return JsonResponse({"candidates":candidate_dict})
class TweetView(View):
def get(self, request):
print('in tweet')
time = int(float(request.GET.get('time')))
candidate = request.GET.get('candidate')
rating = float(request.GET.get('rating'))
# print(time)
tweets = Tweet.objects.filter(candidate=candidate)
# print(tweets.count())
# print(tweets[0].date)
# print(tweets[0].date.hour)
# print(tweets[0].date.minute)
for tweet in tweets:
print(time)
print(tweet.date.hour * 60 + tweet.date.minute)
if tweet.date.hour * 60 + tweet.date.minute in range(time-5,time+5) and rating -0.05 < tweet.score < rating + 0.05 :
return JsonResponse({"tweet": tweet.text, "score": tweet.score, "date": tweet.date})
return JsonResponse({"tweet": "No Tweet found"})
class ThirdView(View):
template = 'sentiment/third.html'
candidate_list = ["Trump", "Bush", "Walker", "Huckabee", "Carson", "Cruz", "Rubio", "Paul", "Christie", "Kasich"]
def get(self, request):
return render(request, self.template, {'candidates': self.candidate_list})
class DotChartView(View):
def get(self,request):
all_tweets = Tweet.objects.all()
list_tweets = []
for tweet in all_tweets:
time = tweet.date.hour * 60 + tweet.date.minute
list_tweets.append({"text":tweet.text, "time":time,"score":tweet.score,"candidate":tweet.candidate})
return JsonResponse({"all_tweets":list_tweets})
| mit |
vipul-sharma20/oh-mainline | vendor/packages/python-openid/openid/test/test_ax.py | 77 | 21540 | """Tests for the attribute exchange extension module
"""
import unittest
from openid.extensions import ax
from openid.message import NamespaceMap, Message, OPENID2_NS
from openid.consumer.consumer import SuccessResponse
class BogusAXMessage(ax.AXMessage):
mode = 'bogus'
getExtensionArgs = ax.AXMessage._newArgs
class DummyRequest(object):
def __init__(self, message):
self.message = message
class AXMessageTest(unittest.TestCase):
def setUp(self):
self.bax = BogusAXMessage()
def test_checkMode(self):
check = self.bax._checkMode
self.failUnlessRaises(ax.NotAXMessage, check, {})
self.failUnlessRaises(ax.AXError, check, {'mode':'fetch_request'})
# does not raise an exception when the mode is right
check({'mode':self.bax.mode})
def test_checkMode_newArgs(self):
"""_newArgs generates something that has the correct mode"""
# This would raise AXError if it didn't like the mode newArgs made.
self.bax._checkMode(self.bax._newArgs())
class AttrInfoTest(unittest.TestCase):
def test_construct(self):
self.failUnlessRaises(TypeError, ax.AttrInfo)
type_uri = 'a uri'
ainfo = ax.AttrInfo(type_uri)
self.failUnlessEqual(type_uri, ainfo.type_uri)
self.failUnlessEqual(1, ainfo.count)
self.failIf(ainfo.required)
self.failUnless(ainfo.alias is None)
class ToTypeURIsTest(unittest.TestCase):
def setUp(self):
self.aliases = NamespaceMap()
def test_empty(self):
for empty in [None, '']:
uris = ax.toTypeURIs(self.aliases, empty)
self.failUnlessEqual([], uris)
def test_undefined(self):
self.failUnlessRaises(
KeyError,
ax.toTypeURIs, self.aliases, 'http://janrain.com/')
def test_one(self):
uri = 'http://janrain.com/'
alias = 'openid_hackers'
self.aliases.addAlias(uri, alias)
uris = ax.toTypeURIs(self.aliases, alias)
self.failUnlessEqual([uri], uris)
def test_two(self):
uri1 = 'http://janrain.com/'
alias1 = 'openid_hackers'
self.aliases.addAlias(uri1, alias1)
uri2 = 'http://jyte.com/'
alias2 = 'openid_hack'
self.aliases.addAlias(uri2, alias2)
uris = ax.toTypeURIs(self.aliases, ','.join([alias1, alias2]))
self.failUnlessEqual([uri1, uri2], uris)
class ParseAXValuesTest(unittest.TestCase):
"""Testing AXKeyValueMessage.parseExtensionArgs."""
def failUnlessAXKeyError(self, ax_args):
msg = ax.AXKeyValueMessage()
self.failUnlessRaises(KeyError, msg.parseExtensionArgs, ax_args)
def failUnlessAXValues(self, ax_args, expected_args):
"""Fail unless parseExtensionArgs(ax_args) == expected_args."""
msg = ax.AXKeyValueMessage()
msg.parseExtensionArgs(ax_args)
self.failUnlessEqual(expected_args, msg.data)
def test_emptyIsValid(self):
self.failUnlessAXValues({}, {})
def test_missingValueForAliasExplodes(self):
self.failUnlessAXKeyError({'type.foo':'urn:foo'})
def test_countPresentButNotValue(self):
self.failUnlessAXKeyError({'type.foo':'urn:foo',
'count.foo':'1'})
def test_invalidCountValue(self):
msg = ax.FetchRequest()
self.failUnlessRaises(ax.AXError,
msg.parseExtensionArgs,
{'type.foo':'urn:foo',
'count.foo':'bogus'})
def test_requestUnlimitedValues(self):
msg = ax.FetchRequest()
msg.parseExtensionArgs(
{'mode':'fetch_request',
'required':'foo',
'type.foo':'urn:foo',
'count.foo':ax.UNLIMITED_VALUES})
attrs = list(msg.iterAttrs())
foo = attrs[0]
self.failUnless(foo.count == ax.UNLIMITED_VALUES)
self.failUnless(foo.wantsUnlimitedValues())
def test_longAlias(self):
# Spec minimum length is 32 characters. This is a silly test
# for this library, but it's here for completeness.
alias = 'x' * ax.MINIMUM_SUPPORTED_ALIAS_LENGTH
msg = ax.AXKeyValueMessage()
msg.parseExtensionArgs(
{'type.%s' % (alias,): 'urn:foo',
'count.%s' % (alias,): '1',
'value.%s.1' % (alias,): 'first'}
)
def test_invalidAlias(self):
types = [
ax.AXKeyValueMessage,
ax.FetchRequest
]
inputs = [
{'type.a.b':'urn:foo',
'count.a.b':'1'},
{'type.a,b':'urn:foo',
'count.a,b':'1'},
]
for typ in types:
for input in inputs:
msg = typ()
self.failUnlessRaises(ax.AXError, msg.parseExtensionArgs,
input)
def test_countPresentAndIsZero(self):
self.failUnlessAXValues(
{'type.foo':'urn:foo',
'count.foo':'0',
}, {'urn:foo':[]})
def test_singletonEmpty(self):
self.failUnlessAXValues(
{'type.foo':'urn:foo',
'value.foo':'',
}, {'urn:foo':[]})
def test_doubleAlias(self):
self.failUnlessAXKeyError(
{'type.foo':'urn:foo',
'value.foo':'',
'type.bar':'urn:foo',
'value.bar':'',
})
def test_doubleSingleton(self):
self.failUnlessAXValues(
{'type.foo':'urn:foo',
'value.foo':'',
'type.bar':'urn:bar',
'value.bar':'',
}, {'urn:foo':[], 'urn:bar':[]})
def test_singletonValue(self):
self.failUnlessAXValues(
{'type.foo':'urn:foo',
'value.foo':'Westfall',
}, {'urn:foo':['Westfall']})
class FetchRequestTest(unittest.TestCase):
def setUp(self):
self.msg = ax.FetchRequest()
self.type_a = 'http://janrain.example.com/a'
self.alias_a = 'a'
def test_mode(self):
self.failUnlessEqual(self.msg.mode, 'fetch_request')
def test_construct(self):
self.failUnlessEqual({}, self.msg.requested_attributes)
self.failUnlessEqual(None, self.msg.update_url)
msg = ax.FetchRequest('hailstorm')
self.failUnlessEqual({}, msg.requested_attributes)
self.failUnlessEqual('hailstorm', msg.update_url)
def test_add(self):
uri = 'mud://puddle'
# Not yet added:
self.failIf(uri in self.msg)
attr = ax.AttrInfo(uri)
self.msg.add(attr)
# Present after adding
self.failUnless(uri in self.msg)
def test_addTwice(self):
uri = 'lightning://storm'
attr = ax.AttrInfo(uri)
self.msg.add(attr)
self.failUnlessRaises(KeyError, self.msg.add, attr)
def test_getExtensionArgs_empty(self):
expected_args = {
'mode':'fetch_request',
}
self.failUnlessEqual(expected_args, self.msg.getExtensionArgs())
def test_getExtensionArgs_noAlias(self):
attr = ax.AttrInfo(
type_uri = 'type://of.transportation',
)
self.msg.add(attr)
ax_args = self.msg.getExtensionArgs()
for k, v in ax_args.iteritems():
if v == attr.type_uri and k.startswith('type.'):
alias = k[5:]
break
else:
self.fail("Didn't find the type definition")
self.failUnlessExtensionArgs({
'type.' + alias:attr.type_uri,
'if_available':alias,
})
def test_getExtensionArgs_alias_if_available(self):
attr = ax.AttrInfo(
type_uri = 'type://of.transportation',
alias = 'transport',
)
self.msg.add(attr)
self.failUnlessExtensionArgs({
'type.' + attr.alias:attr.type_uri,
'if_available':attr.alias,
})
def test_getExtensionArgs_alias_req(self):
attr = ax.AttrInfo(
type_uri = 'type://of.transportation',
alias = 'transport',
required = True,
)
self.msg.add(attr)
self.failUnlessExtensionArgs({
'type.' + attr.alias:attr.type_uri,
'required':attr.alias,
})
def failUnlessExtensionArgs(self, expected_args):
"""Make sure that getExtensionArgs has the expected result
This method will fill in the mode.
"""
expected_args = dict(expected_args)
expected_args['mode'] = self.msg.mode
self.failUnlessEqual(expected_args, self.msg.getExtensionArgs())
def test_isIterable(self):
self.failUnlessEqual([], list(self.msg))
self.failUnlessEqual([], list(self.msg.iterAttrs()))
def test_getRequiredAttrs_empty(self):
self.failUnlessEqual([], self.msg.getRequiredAttrs())
def test_parseExtensionArgs_extraType(self):
extension_args = {
'mode':'fetch_request',
'type.' + self.alias_a:self.type_a,
}
self.failUnlessRaises(ValueError,
self.msg.parseExtensionArgs, extension_args)
def test_parseExtensionArgs(self):
extension_args = {
'mode':'fetch_request',
'type.' + self.alias_a:self.type_a,
'if_available':self.alias_a
}
self.msg.parseExtensionArgs(extension_args)
self.failUnless(self.type_a in self.msg)
self.failUnlessEqual([self.type_a], list(self.msg))
attr_info = self.msg.requested_attributes.get(self.type_a)
self.failUnless(attr_info)
self.failIf(attr_info.required)
self.failUnlessEqual(self.type_a, attr_info.type_uri)
self.failUnlessEqual(self.alias_a, attr_info.alias)
self.failUnlessEqual([attr_info], list(self.msg.iterAttrs()))
def test_extensionArgs_idempotent(self):
extension_args = {
'mode':'fetch_request',
'type.' + self.alias_a:self.type_a,
'if_available':self.alias_a
}
self.msg.parseExtensionArgs(extension_args)
self.failUnlessEqual(extension_args, self.msg.getExtensionArgs())
self.failIf(self.msg.requested_attributes[self.type_a].required)
def test_extensionArgs_idempotent_count_required(self):
extension_args = {
'mode':'fetch_request',
'type.' + self.alias_a:self.type_a,
'count.' + self.alias_a:'2',
'required':self.alias_a
}
self.msg.parseExtensionArgs(extension_args)
self.failUnlessEqual(extension_args, self.msg.getExtensionArgs())
self.failUnless(self.msg.requested_attributes[self.type_a].required)
def test_extensionArgs_count1(self):
extension_args = {
'mode':'fetch_request',
'type.' + self.alias_a:self.type_a,
'count.' + self.alias_a:'1',
'if_available':self.alias_a,
}
extension_args_norm = {
'mode':'fetch_request',
'type.' + self.alias_a:self.type_a,
'if_available':self.alias_a,
}
self.msg.parseExtensionArgs(extension_args)
self.failUnlessEqual(extension_args_norm, self.msg.getExtensionArgs())
def test_openidNoRealm(self):
openid_req_msg = Message.fromOpenIDArgs({
'mode': 'checkid_setup',
'ns': OPENID2_NS,
'ns.ax': ax.AXMessage.ns_uri,
'ax.update_url': 'http://different.site/path',
'ax.mode': 'fetch_request',
})
self.failUnlessRaises(ax.AXError,
ax.FetchRequest.fromOpenIDRequest,
DummyRequest(openid_req_msg))
def test_openidUpdateURLVerificationError(self):
openid_req_msg = Message.fromOpenIDArgs({
'mode': 'checkid_setup',
'ns': OPENID2_NS,
'realm': 'http://example.com/realm',
'ns.ax': ax.AXMessage.ns_uri,
'ax.update_url': 'http://different.site/path',
'ax.mode': 'fetch_request',
})
self.failUnlessRaises(ax.AXError,
ax.FetchRequest.fromOpenIDRequest,
DummyRequest(openid_req_msg))
def test_openidUpdateURLVerificationSuccess(self):
openid_req_msg = Message.fromOpenIDArgs({
'mode': 'checkid_setup',
'ns': OPENID2_NS,
'realm': 'http://example.com/realm',
'ns.ax': ax.AXMessage.ns_uri,
'ax.update_url': 'http://example.com/realm/update_path',
'ax.mode': 'fetch_request',
})
fr = ax.FetchRequest.fromOpenIDRequest(DummyRequest(openid_req_msg))
def test_openidUpdateURLVerificationSuccessReturnTo(self):
openid_req_msg = Message.fromOpenIDArgs({
'mode': 'checkid_setup',
'ns': OPENID2_NS,
'return_to': 'http://example.com/realm',
'ns.ax': ax.AXMessage.ns_uri,
'ax.update_url': 'http://example.com/realm/update_path',
'ax.mode': 'fetch_request',
})
fr = ax.FetchRequest.fromOpenIDRequest(DummyRequest(openid_req_msg))
def test_fromOpenIDRequestWithoutExtension(self):
"""return None for an OpenIDRequest without AX paramaters."""
openid_req_msg = Message.fromOpenIDArgs({
'mode': 'checkid_setup',
'ns': OPENID2_NS,
})
oreq = DummyRequest(openid_req_msg)
r = ax.FetchRequest.fromOpenIDRequest(oreq)
self.failUnless(r is None, "%s is not None" % (r,))
def test_fromOpenIDRequestWithoutData(self):
"""return something for SuccessResponse with AX paramaters,
even if it is the empty set."""
openid_req_msg = Message.fromOpenIDArgs({
'mode': 'checkid_setup',
'realm': 'http://example.com/realm',
'ns': OPENID2_NS,
'ns.ax': ax.AXMessage.ns_uri,
'ax.mode': 'fetch_request',
})
oreq = DummyRequest(openid_req_msg)
r = ax.FetchRequest.fromOpenIDRequest(oreq)
self.failUnless(r is not None)
class FetchResponseTest(unittest.TestCase):
def setUp(self):
self.msg = ax.FetchResponse()
self.value_a = 'monkeys'
self.type_a = 'http://phone.home/'
self.alias_a = 'robocop'
self.request_update_url = 'http://update.bogus/'
def test_construct(self):
self.failUnless(self.msg.update_url is None)
self.failUnlessEqual({}, self.msg.data)
def test_getExtensionArgs_empty(self):
expected_args = {
'mode':'fetch_response',
}
self.failUnlessEqual(expected_args, self.msg.getExtensionArgs())
def test_getExtensionArgs_empty_request(self):
expected_args = {
'mode':'fetch_response',
}
req = ax.FetchRequest()
msg = ax.FetchResponse(request=req)
self.failUnlessEqual(expected_args, msg.getExtensionArgs())
def test_getExtensionArgs_empty_request_some(self):
uri = 'http://not.found/'
alias = 'ext0'
expected_args = {
'mode':'fetch_response',
'type.%s' % (alias,): uri,
'count.%s' % (alias,): '0'
}
req = ax.FetchRequest()
req.add(ax.AttrInfo(uri))
msg = ax.FetchResponse(request=req)
self.failUnlessEqual(expected_args, msg.getExtensionArgs())
def test_updateUrlInResponse(self):
uri = 'http://not.found/'
alias = 'ext0'
expected_args = {
'mode':'fetch_response',
'update_url': self.request_update_url,
'type.%s' % (alias,): uri,
'count.%s' % (alias,): '0'
}
req = ax.FetchRequest(update_url=self.request_update_url)
req.add(ax.AttrInfo(uri))
msg = ax.FetchResponse(request=req)
self.failUnlessEqual(expected_args, msg.getExtensionArgs())
def test_getExtensionArgs_some_request(self):
expected_args = {
'mode':'fetch_response',
'type.' + self.alias_a:self.type_a,
'value.' + self.alias_a + '.1':self.value_a,
'count.' + self.alias_a: '1'
}
req = ax.FetchRequest()
req.add(ax.AttrInfo(self.type_a, alias=self.alias_a))
msg = ax.FetchResponse(request=req)
msg.addValue(self.type_a, self.value_a)
self.failUnlessEqual(expected_args, msg.getExtensionArgs())
def test_getExtensionArgs_some_not_request(self):
req = ax.FetchRequest()
msg = ax.FetchResponse(request=req)
msg.addValue(self.type_a, self.value_a)
self.failUnlessRaises(KeyError, msg.getExtensionArgs)
def test_getSingle_success(self):
req = ax.FetchRequest()
self.msg.addValue(self.type_a, self.value_a)
self.failUnlessEqual(self.value_a, self.msg.getSingle(self.type_a))
def test_getSingle_none(self):
self.failUnlessEqual(None, self.msg.getSingle(self.type_a))
def test_getSingle_extra(self):
self.msg.setValues(self.type_a, ['x', 'y'])
self.failUnlessRaises(ax.AXError, self.msg.getSingle, self.type_a)
def test_get(self):
self.failUnlessRaises(KeyError, self.msg.get, self.type_a)
def test_fromSuccessResponseWithoutExtension(self):
"""return None for SuccessResponse with no AX paramaters."""
args = {
'mode': 'id_res',
'ns': OPENID2_NS,
}
sf = ['openid.' + i for i in args.keys()]
msg = Message.fromOpenIDArgs(args)
class Endpoint:
claimed_id = 'http://invalid.'
oreq = SuccessResponse(Endpoint(), msg, signed_fields=sf)
r = ax.FetchResponse.fromSuccessResponse(oreq)
self.failUnless(r is None, "%s is not None" % (r,))
def test_fromSuccessResponseWithoutData(self):
"""return something for SuccessResponse with AX paramaters,
even if it is the empty set."""
args = {
'mode': 'id_res',
'ns': OPENID2_NS,
'ns.ax': ax.AXMessage.ns_uri,
'ax.mode': 'fetch_response',
}
sf = ['openid.' + i for i in args.keys()]
msg = Message.fromOpenIDArgs(args)
class Endpoint:
claimed_id = 'http://invalid.'
oreq = SuccessResponse(Endpoint(), msg, signed_fields=sf)
r = ax.FetchResponse.fromSuccessResponse(oreq)
self.failUnless(r is not None)
def test_fromSuccessResponseWithData(self):
name = 'ext0'
value = 'snozzberry'
uri = "http://willy.wonka.name/"
args = {
'mode': 'id_res',
'ns': OPENID2_NS,
'ns.ax': ax.AXMessage.ns_uri,
'ax.update_url': 'http://example.com/realm/update_path',
'ax.mode': 'fetch_response',
'ax.type.'+name: uri,
'ax.count.'+name: '1',
'ax.value.%s.1'%name: value,
}
sf = ['openid.' + i for i in args.keys()]
msg = Message.fromOpenIDArgs(args)
class Endpoint:
claimed_id = 'http://invalid.'
resp = SuccessResponse(Endpoint(), msg, signed_fields=sf)
ax_resp = ax.FetchResponse.fromSuccessResponse(resp)
values = ax_resp.get(uri)
self.failUnlessEqual([value], values)
class StoreRequestTest(unittest.TestCase):
def setUp(self):
self.msg = ax.StoreRequest()
self.type_a = 'http://three.count/'
self.alias_a = 'juggling'
def test_construct(self):
self.failUnlessEqual({}, self.msg.data)
def test_getExtensionArgs_empty(self):
args = self.msg.getExtensionArgs()
expected_args = {
'mode':'store_request',
}
self.failUnlessEqual(expected_args, args)
def test_getExtensionArgs_nonempty(self):
aliases = NamespaceMap()
aliases.addAlias(self.type_a, self.alias_a)
msg = ax.StoreRequest(aliases=aliases)
msg.setValues(self.type_a, ['foo', 'bar'])
args = msg.getExtensionArgs()
expected_args = {
'mode':'store_request',
'type.' + self.alias_a: self.type_a,
'count.' + self.alias_a: '2',
'value.%s.1' % (self.alias_a,):'foo',
'value.%s.2' % (self.alias_a,):'bar',
}
self.failUnlessEqual(expected_args, args)
class StoreResponseTest(unittest.TestCase):
def test_success(self):
msg = ax.StoreResponse()
self.failUnless(msg.succeeded())
self.failIf(msg.error_message)
self.failUnlessEqual({'mode':'store_response_success'},
msg.getExtensionArgs())
def test_fail_nomsg(self):
msg = ax.StoreResponse(False)
self.failIf(msg.succeeded())
self.failIf(msg.error_message)
self.failUnlessEqual({'mode':'store_response_failure'},
msg.getExtensionArgs())
def test_fail_msg(self):
reason = 'no reason, really'
msg = ax.StoreResponse(False, reason)
self.failIf(msg.succeeded())
self.failUnlessEqual(reason, msg.error_message)
self.failUnlessEqual({'mode':'store_response_failure',
'error':reason}, msg.getExtensionArgs())
| agpl-3.0 |
marqueedev/django | tests/template_tests/filter_tests/test_striptags.py | 324 | 1338 | from django.template.defaultfilters import striptags
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class StriptagsTests(SimpleTestCase):
@setup({'striptags01': '{{ a|striptags }} {{ b|striptags }}'})
def test_striptags01(self):
output = self.engine.render_to_string(
'striptags01',
{
'a': '<a>x</a> <p><b>y</b></p>',
'b': mark_safe('<a>x</a> <p><b>y</b></p>'),
},
)
self.assertEqual(output, 'x y x y')
@setup({'striptags02': '{% autoescape off %}{{ a|striptags }} {{ b|striptags }}{% endautoescape %}'})
def test_striptags02(self):
output = self.engine.render_to_string(
'striptags02',
{
'a': '<a>x</a> <p><b>y</b></p>',
'b': mark_safe('<a>x</a> <p><b>y</b></p>'),
},
)
self.assertEqual(output, 'x y x y')
class FunctionTests(SimpleTestCase):
def test_strip(self):
self.assertEqual(
striptags('some <b>html</b> with <script>alert("You smell")</script> disallowed <img /> tags'),
'some html with alert("You smell") disallowed tags',
)
def test_non_string_input(self):
self.assertEqual(striptags(123), '123')
| bsd-3-clause |
LokiNetworks/empower-runtime | empower/lvnf_ems/lvnf_set.py | 2 | 4546 | #!/usr/bin/env python3
#
# Copyright (c) 2016 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""LVNF EMS SET command."""
from uuid import UUID
from empower.core.lvnf import LVNF
from empower.core.module import Module
from empower.lvnf_ems import PT_LVNF_SET_REQUEST
from empower.lvnf_ems import PT_LVNF_SET_RESPONSE
from empower.lvnfp.lvnfpserver import ModuleLVNFPWorker
from empower.main import RUNTIME
class LVNFSet(Module):
"""LVNF Set object."""
MODULE_NAME = "lvnf_get"
REQUIRED = ['module_type', 'worker', 'tenant_id', 'lvnf', 'handler',
'value']
def __init__(self):
super().__init__()
# parameters
self.__lvnf = None
self.__handler = None
self.value = None
# data structures
self.samples = None
self.retcode = None
def __eq__(self, other):
return super().__eq__(other) and \
self.lvnf == other.lvnf and \
self.handler == other.handler and \
self.value == other.value
@property
def handler(self):
"""Return the handler name."""
return self.__handler
@handler.setter
def handler(self, handler):
"""Set the handler name."""
tenant = RUNTIME.tenants[self.tenant_id]
lvnf = tenant.lvnfs[self.lvnf]
if handler not in lvnf.image.handlers:
raise KeyError("Handler %s not found" % handler)
self.__handler = handler
@property
def lvnf(self):
return self.__lvnf
@lvnf.setter
def lvnf(self, value):
self.__lvnf = UUID(str(value))
def to_dict(self):
"""Return a JSON-serializable representation of this object."""
out = super().to_dict()
out['lvnf'] = self.lvnf
out['handler'] = self.handler
out['samples'] = self.samples
out['retcode'] = self.retcode
return out
def run_once(self):
"""Send out handler requests."""
if self.tenant_id not in RUNTIME.tenants:
return
tenant = RUNTIME.tenants[self.tenant_id]
if self.lvnf not in tenant.lvnfs:
self.log.error("LVNF %s not found.", self.lvnf)
return
lvnf = tenant.lvnfs[self.lvnf]
if not lvnf.cpp.connection:
return
handler_req = {'module_id': self.module_id,
'lvnf_id': self.lvnf,
'tenant_id': self.tenant_id,
'handler': lvnf.image.handlers[self.handler],
'value': self.value}
lvnf.cpp.connection.send_message(PT_LVNF_SET_REQUEST, handler_req)
def handle_response(self, response):
"""Handle an incoming LVNF_SET_RESPONSE message.
Args:
response, a LVNF_SET_RESPONSE message
Returns:
None
"""
tenant_id = UUID(response['tenant_id'])
lvnf_id = UUID(response['lvnf_id'])
tenant = RUNTIME.tenants[tenant_id]
if lvnf_id not in tenant.lvnfs:
return
# update this object
if response['retcode'] != 200:
error = "%s (%s)" % (response['retcode'], response['samples'])
self.log.error("Error accessing %s: %s", self.handler, error)
return
self.retcode = response['retcode']
self.samples = response['samples']
# call callback
self.handle_callback(self)
class LVNFSetWorker(ModuleLVNFPWorker):
""" LVNF Set worker. """
pass
def lvnf_set(**kwargs):
"""Create a new module."""
return RUNTIME.components[LVNFSetWorker.__module__].add_module(**kwargs)
def bound_lvnf_set(self, **kwargs):
"""Create a new module (app version)."""
kwargs['tenant_id'] = self.tenant.tenant_id
kwargs['lvnf'] = self.lvnf
kwargs['every'] = -1
return lvnf_set(**kwargs)
setattr(LVNF, LVNFSet.MODULE_NAME, bound_lvnf_set)
def launch():
""" Initialize the module. """
return LVNFSetWorker(LVNFSet, PT_LVNF_SET_RESPONSE)
| apache-2.0 |
poppogbr/genropy | resources/common/print_utils.py | 1 | 8971 | #!/usr/bin/env python
# encoding: utf-8
"""
print_utils.py
Created by Saverio Porcari on 2009-06-29.
Copyright (c) 2009 __MyCompanyName__. All rights reserved.
"""
from gnr.web.gnrbaseclasses import BaseComponent
class PrintUtils(BaseComponent):
py_requires = 'batch_runner:BatchRunner'
def serverPrint(self, pane, name, table=None, table_resource=None,
selectionName='=list.selectionName',
selectionFilterCb=None,
recordId=None,
datapath=None, parameters_cb=None,
resultpath='.print_result',
thermoParams=None, docName=None, rebuild=True,
gridId='maingrid', batch_class=None,
commitAfterPrint=None,
selectedRowidx=None, pdfOnly=False,
_onResult='', data_method=None,
waitingDlg=None, **kwargs):
table = table or self.maintable
if not batch_class:
if recordId:
batch_class = 'PrintRecord'
if recordId == '*':
recordId = None
else:
batch_class = 'PrintSelection'
if not recordId and not selectedRowidx and not selectionFilterCb:
selectedRowidx = "==genro.wdgById('%s').getSelectedRowidx();" % gridId
datapath = datapath or 'serverprint.%s' % name
dlgPars = {}
for k, v in kwargs.items():
if k.startswith('dlg_'):
dlgPars[k[4:]] = v
kwargs.pop(k)
self.printOptDialog(pane, name, datapath, dlgPars=dlgPars, parameters_cb=parameters_cb, pdfOnly=pdfOnly)
pane.dataController("FIRE .run = 'print';", _if='cachedPrinterParams.getItem("printer_name")',
cachedPrinterParams="=_clientCtx.printerSetup.%s" % name,
_fired='^.print', _else='genro.dlg.alert(msg,title)',
msg='!!No printer selected', title='!!Warning', datapath=datapath)
batchPars = dict(datapath=datapath,
table=table, batch_class=batch_class,
table_resource=table_resource,
rebuild=rebuild, recordId=recordId,
resultpath=resultpath, thermoParams=thermoParams,
selectionName=selectionName,
selectionFilterCb=selectionFilterCb,
commitAfterPrint=commitAfterPrint,
docName=docName, selectedRowidx=selectedRowidx,
runKwargs='=.parameters.data',
data_method=data_method, waitingDlg=waitingDlg,
**kwargs)
self.buildBatchRunner(pane, _onResult='if($1){genro.download($1)};%s' % _onResult,
pdfParams='=.pdf', fired='^.dlpdf', thermoId='%s_thermo_pdf' % name, **batchPars)
if not pdfOnly:
self.buildBatchRunner(pane, printParams='=_clientCtx.printerSetup.%s' % name,
_onResult=_onResult, fired='^.run', thermoId='%s_thermo_print' % name, **batchPars)
def printOptDialog(self, pane, name, datapath=None, dlgPars=None, parameters_cb=None, pdfOnly=False):
title = dlgPars.get('title', "!!Print options")
height = dlgPars.get('height', "200px")
width = dlgPars.get('width', "450px")
dialog = pane.dialog(title="!!Print options", nodeId=name, datapath=datapath)
bc = dialog.borderContainer(height=height, width=width, _class='pbl_roundedGroup')
bc.dataController('genro.wdgById("%s").hide();' % name, _fired="^.close")
bc.dataController("""var currPrinterOpt = GET _clientCtx.printerSetup.%s;
SET .printer.params = currPrinterOpt? currPrinterOpt.deepCopy():new gnr.GnrBag();
genro.wdgById("%s").show();""" % (name, name), _fired='^.open')
bottom = bc.contentPane(region='bottom', _class='dialog_bottom')
bottom.button('!!Close', baseClass='bottom_btn', float='left', margin='1px', fire='.close')
bottom.button('!!Pdf', baseClass='bottom_btn', float='right', margin='1px',
action='FIRE .close; FIRE .dlpdf;')
if not pdfOnly:
bottom.button('!!Print', baseClass='bottom_btn', float='right', margin='1px',
action=""" FIRE .close;
var currPrinterOpt = GET .printer.params;
SET _clientCtx.printerSetup.%s = currPrinterOpt.deepCopy();
FIRE .print;
""" % name)
tc_opt = bc.tabContainer(region='center', margin='5px')
if parameters_cb:
parameters_cb(tc_opt, datapath='.parameters')
if not pdfOnly:
self._utl_print_opt(tc_opt)
self._utl_pdf_opt(tc_opt)
def _utl_print_opt(self, tc):
pane = tc.contentPane(title='Printer', datapath='.printer')
pane.data('.serverPrinters', self.rpc_getPrinters())
pane.dataRpc('.printer_attributes', 'getPrinterAttributes',
printer_name='^.params.printer_name', _if='printer_name!="PDF"')
pane.data('.selected_printer', '-')
fb = pane.formbuilder(cols=3, margin='5px')
ddb = fb.dropDownButton(label='!!Select printer', lbl='!!Printer')
ddb.menu(storepath='.serverPrinters', action="""SET .selected_printer = $1.fullpath;
SET .params.printer_name=$1.name;""")
fb.div(value='^.params.printer_name', font_size='.9em', font_style='italic', width='15em', colspan=2)
fb.filteringselect(value='^.params.printer_option.paper', lbl='!!Paper:', width='20em',
colspan=3, storepath='.printer_attributes.paper_supported')
fb.filteringselect(value='^.params.printer_option.tray', lbl='!!Tray:', width='20em', colspan=3,
storepath='.printer_attributes.tray_supported')
def rpc_getPrinters(self):
print_handler = self.getService('print')
if print_handler:
return print_handler.getPrinters()
def rpc_getPrinterAttributes(self, printer_name):
if printer_name and printer_name != 'PDF':
attributes = self.getService('print').getPrinterAttributes(printer_name)
return attributes
def _utl_pdf_opt(self, tc):
pane = tc.contentPane(title='!!Pdf', datapath='.pdf')
fb = pane.formbuilder(cols=1)
fb.dataFormula('.zipped', 'false', _onStart=True)
fb.checkbox(value='^.zipped', label='!!Zip folder')
fb.checkbox(value='^.#parent.forked', label='!!Forked process')
####################DEPRECATED STUFF###################
def printDialog(self, data, title=None, _fired=None):
page = self.pageSource()
dlgId = data.replace('.', '_')
dlg = page.dialog(nodeId=dlgId, title=title, datapath=data)
dlg.data('.printers', self.rpc_getPrinters())
dlg.data('.selected_printer', '-')
page.dataController("genro.wdgById('%s').show()" % dlgId, _fired=_fired)
tc = dlg.tabContainer(datapath='.params',
height='300px',
width='420px', nodeId='printDlgStack',
selectedPage='^%s.selPage' % data)
generalPage = tc.borderContainer(title='!!Print options')
pdfPage = tc.borderContainer(pageName='pdf')
pdfPage.button('!!General', float='right', action='SET .selPage="general";')
bottomBar = generalPage.contentPane(region='bottom', height='25px', datapath=data)
centerForm = generalPage.contentPane(region='center')
cfb = centerForm.formbuilder(cols=1)
cfb.textbox(value='^.selected_printer',
lbl='Printer:', readOnly=True).menu(storepath='%s.printers' % data,
modifiers='*',
action='SET .selected_printer = $1.fullpath; SET .printer_name=$1.name;')
cfb.filteringselect(value='^.printer_options.paper', lbl='!!Paper:',
storepath='.printer_attributes.paper_supported')
cfb.filteringselect(value='^.printer_options.tray', lbl='!!Tray:',
storepath='.printer_attributes.tray_supported')
cfb.dataRpc('.printer_attributes', 'getPrinterAttributes', printer_name='^.printer_name', _if='printer_name')
bottomBar.button('!!PDF', float='right', action='SET .selPage="pdf";')
bottomBar.button('!!Print', float='right', action="FIRE .run; genro.wdgById('%s').hide();" % dlgId)
bottomBar.button('!!Cancel', float='right', action="genro.wdgById('%s').hide()" % dlgId)
| lgpl-2.1 |
Signbank/BSL-signbank | signbank/dictionary/urls.py | 1 | 2739 | from django.conf.urls import *
from django.contrib.auth.decorators import login_required, permission_required
from signbank.dictionary.models import *
from signbank.dictionary.forms import *
from signbank.dictionary.views import feature_search
from signbank.dictionary.adminviews import GlossListView, GlossDetailView
urlpatterns = patterns('',
# index page is just the search page
url(r'^$', 'signbank.dictionary.views.search'),
# we use the same view for a definition and for the feedback form on that
# definition, the first component of the path is word or feedback in each case
url(r'^words/(?P<keyword>.+)-(?P<n>\d+).html$',
'signbank.dictionary.views.word', name='word_view'),
url(r'^tag/(?P<tag>[^/]*)/?$', 'signbank.dictionary.tagviews.taglist'),
# and and alternate view for direct display of a gloss
url(r'gloss/(?P<idgloss>.+).html$', 'signbank.dictionary.views.gloss', name='public_gloss'),
# Regional views for words and glosses
url(r'^regional/(?P<keyword>.+)-(?P<n>\d+).html$',
'signbank.dictionary.views.regional', name='regional_view'),
url(r'^search/$', 'signbank.dictionary.views.search', name="search"),
url(r'^featuresearch/$', 'signbank.dictionary.views.feature_search', name='feature_search'),
url(r'^update/gloss/(?P<glossid>\d+)$', 'signbank.dictionary.update.update_gloss', name='update_gloss'),
url(r'^update/tag/(?P<glossid>\d+)$', 'signbank.dictionary.update.add_tag', name='add_tag'),
url(r'^update/definition/(?P<glossid>\d+)$', 'signbank.dictionary.update.add_definition', name='add_definition'),
url(r'^update/relation/$', 'signbank.dictionary.update.add_relation', name='add_relation'),
url(r'^update/region/(?P<glossid>\d+)$', 'signbank.dictionary.update.add_region', name='add_region'),
url(r'^update/gloss/', 'signbank.dictionary.update.add_gloss', name='add_gloss'),
url(r'^update_ecv/', GlossListView.as_view(only_export_ecv=True)),
url(r'^ajax/keyword/(?P<prefix>.*)$', 'signbank.dictionary.views.keyword_value_list'),
url(r'^ajax/tags/$', 'signbank.dictionary.tagviews.taglist_json'),
url(r'^ajax/gloss/(?P<prefix>.*)$', 'signbank.dictionary.adminviews.gloss_ajax_complete', name='gloss_complete'),
url(r'^missingvideo.html$', 'signbank.dictionary.views.missing_video_view'),
url(r'package/$', 'signbank.dictionary.views.package'),
url(r'info/$', 'signbank.dictionary.views.info'),
# Admin views
url(r'^list/$', permission_required('dictionary.search_gloss')(GlossListView.as_view()), name='admin_gloss_list'),
url(r'^gloss/(?P<pk>\d+)', permission_required('dictionary.search_gloss')(GlossDetailView.as_view()), name='admin_gloss_view'),
)
| bsd-3-clause |
theakholic/ThinkStats2 | code/chap12soln.py | 68 | 4459 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import statsmodels.formula.api as smf
import thinkplot
import thinkstats2
import regression
import timeseries
def RunQuadraticModel(daily):
"""Runs a linear model of prices versus years.
daily: DataFrame of daily prices
returns: model, results
"""
daily['years2'] = daily.years**2
model = smf.ols('ppg ~ years + years2', data=daily)
results = model.fit()
return model, results
def PlotQuadraticModel(daily, name):
"""
"""
model, results = RunQuadraticModel(daily)
regression.SummarizeResults(results)
timeseries.PlotFittedValues(model, results, label=name)
thinkplot.Save(root='timeseries11',
title='fitted values',
xlabel='years',
xlim=[-0.1, 3.8],
ylabel='price per gram ($)')
timeseries.PlotResidualPercentiles(model, results)
thinkplot.Save(root='timeseries12',
title='residuals',
xlabel='years',
ylabel='price per gram ($)')
years = np.linspace(0, 5, 101)
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
timeseries.PlotPredictions(daily, years, func=RunQuadraticModel)
thinkplot.Save(root='timeseries13',
title='predictions',
xlabel='years',
xlim=[years[0]-0.1, years[-1]+0.1],
ylabel='price per gram ($)')
def PlotEwmaPredictions(daily, name):
"""
"""
# use EWMA to estimate slopes
filled = timeseries.FillMissing(daily)
filled['slope'] = pandas.ewma(filled.ppg.diff(), span=180)
filled[-1:]
# extract the last inter and slope
start = filled.index[-1]
inter = filled.ewma[-1]
slope = filled.slope[-1]
# reindex the DataFrame, adding a year to the end
dates = pandas.date_range(filled.index.min(),
filled.index.max() + np.timedelta64(365, 'D'))
predicted = filled.reindex(dates)
# generate predicted values and add them to the end
predicted['date'] = predicted.index
one_day = np.timedelta64(1, 'D')
predicted['days'] = (predicted.date - start) / one_day
predict = inter + slope * predicted.days
predicted.ewma.fillna(predict, inplace=True)
# plot the actual values and predictions
thinkplot.Scatter(daily.ppg, alpha=0.1, label=name)
thinkplot.Plot(predicted.ewma)
thinkplot.Save()
class SerialCorrelationTest(thinkstats2.HypothesisTest):
"""Tests serial correlations by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: tuple of xs and ys
"""
series, lag = data
test_stat = abs(thinkstats2.SerialCorr(series, lag))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
series, lag = self.data
permutation = series.reindex(np.random.permutation(series.index))
return permutation, lag
def TestSerialCorr(daily):
"""Tests serial correlations in daily prices and their residuals.
daily: DataFrame of daily prices
"""
# test the correlation between consecutive prices
series = daily.ppg
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
# test for serial correlation in residuals of the linear model
_, results = timeseries.RunLinearModel(daily)
series = results.resid
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
# test for serial correlation in residuals of the quadratic model
_, results = RunQuadraticModel(daily)
series = results.resid
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
def main(name):
transactions = timeseries.ReadData()
dailies = timeseries.GroupByQualityAndDay(transactions)
name = 'high'
daily = dailies[name]
PlotQuadraticModel(daily, name)
TestSerialCorr(daily)
PlotEwmaPredictions(daily, name)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
huaweiswitch/neutron | neutron/openstack/common/fixture/lockutils.py | 34 | 1890 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from neutron.openstack.common import lockutils
class LockFixture(fixtures.Fixture):
"""External locking fixture.
This fixture is basically an alternative to the synchronized decorator with
the external flag so that tearDowns and addCleanups will be included in
the lock context for locking between tests. The fixture is recommended to
be the first line in a test method, like so::
def test_method(self):
self.useFixture(LockFixture)
...
or the first line in setUp if all the test methods in the class are
required to be serialized. Something like::
class TestCase(testtools.testcase):
def setUp(self):
self.useFixture(LockFixture)
super(TestCase, self).setUp()
...
This is because addCleanups are put on a LIFO queue that gets run after the
test method exits. (either by completing or raising an exception)
"""
def __init__(self, name, lock_file_prefix=None):
self.mgr = lockutils.lock(name, lock_file_prefix, True)
def setUp(self):
super(LockFixture, self).setUp()
self.addCleanup(self.mgr.__exit__, None, None, None)
self.mgr.__enter__()
| apache-2.0 |
martinbuc/missionplanner | Lib/site-packages/numpy/distutils/misc_util.py | 53 | 84156 | import os
import re
import sys
import imp
import copy
import glob
import atexit
import tempfile
import subprocess
import shutil
from distutils.errors import DistutilsError
try:
set
except NameError:
from sets import Set as set
from numpy.distutils.compat import get_exception
__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
'dict_append', 'appendpath', 'generate_config_py',
'get_cmd', 'allpath', 'get_mathlibs',
'terminal_has_colors', 'red_text', 'green_text', 'yellow_text',
'blue_text', 'cyan_text', 'cyg2win32','mingw32','all_strings',
'has_f_sources', 'has_cxx_sources', 'filter_sources',
'get_dependencies', 'is_local_src_dir', 'get_ext_source_files',
'get_script_files', 'get_lib_source_files', 'get_data_files',
'dot_join', 'get_frame', 'minrelpath','njoin',
'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',
'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info']
class InstallableLib:
"""
Container to hold information on an installable library.
Parameters
----------
name : str
Name of the installed library.
build_info : dict
Dictionary holding build information.
target_dir : str
Absolute path specifying where to install the library.
See Also
--------
Configuration.add_installed_library
Notes
-----
The three parameters are stored as attributes with the same names.
"""
def __init__(self, name, build_info, target_dir):
self.name = name
self.build_info = build_info
self.target_dir = target_dir
def quote_args(args):
# don't used _nt_quote_args as it does not check if
# args items already have quotes or not.
args = list(args)
for i in range(len(args)):
a = args[i]
if ' ' in a and a[0] not in '"\'':
args[i] = '"%s"' % (a)
return args
def allpath(name):
"Convert a /-separated pathname to one using the OS's path separator."
splitted = name.split('/')
return os.path.join(*splitted)
def rel_path(path, parent_path):
"""Return path relative to parent_path.
"""
pd = os.path.abspath(parent_path)
apath = os.path.abspath(path)
if len(apath)<len(pd):
return path
if apath==pd:
return ''
if pd == apath[:len(pd)]:
assert apath[len(pd)] in [os.sep],repr((path,apath[len(pd)]))
path = apath[len(pd)+1:]
return path
def get_path_from_frame(frame, parent_path=None):
"""Return path of the module given a frame object from the call stack.
Returned path is relative to parent_path when given,
otherwise it is absolute path.
"""
# First, try to find if the file name is in the frame.
try:
caller_file = eval('__file__', frame.f_globals, frame.f_locals)
d = os.path.dirname(os.path.abspath(caller_file))
except NameError:
# __file__ is not defined, so let's try __name__. We try this second
# because setuptools spoofs __name__ to be '__main__' even though
# sys.modules['__main__'] might be something else, like easy_install(1).
caller_name = eval('__name__', frame.f_globals, frame.f_locals)
__import__(caller_name)
mod = sys.modules[caller_name]
if hasattr(mod, '__file__'):
d = os.path.dirname(os.path.abspath(mod.__file__))
else:
# we're probably running setup.py as execfile("setup.py")
# (likely we're building an egg)
d = os.path.abspath('.')
# hmm, should we use sys.argv[0] like in __builtin__ case?
if parent_path is not None:
d = rel_path(d, parent_path)
return d or '.'
def njoin(*path):
"""Join two or more pathname components +
- convert a /-separated pathname to one using the OS's path separator.
- resolve `..` and `.` from path.
Either passing n arguments as in njoin('a','b'), or a sequence
of n names as in njoin(['a','b']) is handled, or a mixture of such arguments.
"""
paths = []
for p in path:
if is_sequence(p):
# njoin(['a', 'b'], 'c')
paths.append(njoin(*p))
else:
assert is_string(p)
paths.append(p)
path = paths
if not path:
# njoin()
joined = ''
else:
# njoin('a', 'b')
joined = os.path.join(*path)
if os.path.sep != '/':
joined = joined.replace('/',os.path.sep)
return minrelpath(joined)
def get_mathlibs(path=None):
"""Return the MATHLIB line from numpyconfig.h
"""
if path is not None:
config_file = os.path.join(path,'_numpyconfig.h')
else:
# Look for the file in each of the numpy include directories.
dirs = get_numpy_include_dirs()
for path in dirs:
fn = os.path.join(path,'_numpyconfig.h')
if os.path.exists(fn):
config_file = fn
break
else:
raise DistutilsError('_numpyconfig.h not found in numpy include '
'dirs %r' % (dirs,))
fid = open(config_file)
mathlibs = []
s = '#define MATHLIB'
for line in fid.readlines():
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
fid.close()
return mathlibs
def minrelpath(path):
"""Resolve `..` and '.' from path.
"""
if not is_string(path):
return path
if '.' not in path:
return path
l = path.split(os.sep)
while l:
try:
i = l.index('.',1)
except ValueError:
break
del l[i]
j = 1
while l:
try:
i = l.index('..',j)
except ValueError:
break
if l[i-1]=='..':
j += 1
else:
del l[i],l[i-1]
j = 1
if not l:
return ''
return os.sep.join(l)
def _fix_paths(paths,local_path,include_non_existing):
assert is_sequence(paths), repr(type(paths))
new_paths = []
assert not is_string(paths),repr(paths)
for n in paths:
if is_string(n):
if '*' in n or '?' in n:
p = glob.glob(n)
p2 = glob.glob(njoin(local_path,n))
if p2:
new_paths.extend(p2)
elif p:
new_paths.extend(p)
else:
if include_non_existing:
new_paths.append(n)
print('could not resolve pattern in %r: %r' \
% (local_path,n))
else:
n2 = njoin(local_path,n)
if os.path.exists(n2):
new_paths.append(n2)
else:
if os.path.exists(n):
new_paths.append(n)
elif include_non_existing:
new_paths.append(n)
if not os.path.exists(n):
print('non-existing path in %r: %r' \
% (local_path,n))
elif is_sequence(n):
new_paths.extend(_fix_paths(n,local_path,include_non_existing))
else:
new_paths.append(n)
return [minrelpath(p) for p in new_paths]
def gpaths(paths, local_path='', include_non_existing=True):
"""Apply glob to paths and prepend local_path if needed.
"""
if is_string(paths):
paths = (paths,)
return _fix_paths(paths,local_path, include_non_existing)
_temporary_directory = None
def clean_up_temporary_directory():
from numpy.distutils import log
global _temporary_directory
if not _temporary_directory:
return
log.debug('removing %s', _temporary_directory)
try:
shutil.rmtree(_temporary_directory)
except OSError:
pass
_temporary_directory = None
def make_temp_file(suffix='', prefix='', text=True):
global _temporary_directory
if not _temporary_directory:
_temporary_directory = tempfile.mkdtemp()
atexit.register(clean_up_temporary_directory)
fid, name = tempfile.mkstemp(suffix=suffix,
prefix=prefix,
dir=_temporary_directory,
text=text)
fo = os.fdopen(fid, 'w')
return fo, name
# Hooks for colored terminal output.
# See also http://www.livinglogic.de/Python/ansistyle
def terminal_has_colors():
if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ:
# Avoid importing curses that causes illegal operation
# with a message:
# PYTHON2 caused an invalid page fault in
# module CYGNURSES7.DLL as 015f:18bbfc28
# Details: Python 2.3.3 [GCC 3.3.1 (cygming special)]
# ssh to Win32 machine from debian
# curses.version is 2.2
# CYGWIN_98-4.10, release 1.5.7(0.109/3/2))
return 0
if hasattr(sys.stdout,'isatty') and sys.stdout.isatty():
try:
import curses
curses.setupterm()
if (curses.tigetnum("colors") >= 0
and curses.tigetnum("pairs") >= 0
and ((curses.tigetstr("setf") is not None
and curses.tigetstr("setb") is not None)
or (curses.tigetstr("setaf") is not None
and curses.tigetstr("setab") is not None)
or curses.tigetstr("scp") is not None)):
return 1
except Exception:
pass
return 0
if terminal_has_colors():
_colour_codes = dict(black=0, red=1, green=2, yellow=3,
blue=4, magenta=5, cyan=6, white=7, default=9)
def colour_text(s, fg=None, bg=None, bold=False):
seq = []
if bold:
seq.append('1')
if fg:
fgcode = 30 + _colour_codes.get(fg.lower(), 0)
seq.append(str(fgcode))
if bg:
bgcode = 40 + _colour_codes.get(fg.lower(), 7)
seq.append(str(bgcode))
if seq:
return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s)
else:
return s
else:
def colour_text(s, fg=None, bg=None):
return s
def default_text(s):
return colour_text(s, 'default')
def red_text(s):
return colour_text(s, 'red')
def green_text(s):
return colour_text(s, 'green')
def yellow_text(s):
return colour_text(s, 'yellow')
def cyan_text(s):
return colour_text(s, 'cyan')
def blue_text(s):
return colour_text(s, 'blue')
#########################
def cyg2win32(path):
if sys.platform=='cygwin' and path.startswith('/cygdrive'):
path = path[10] + ':' + os.path.normcase(path[11:])
return path
def mingw32():
"""Return true when using mingw32 environment.
"""
if sys.platform=='win32':
if os.environ.get('OSTYPE','')=='msys':
return True
if os.environ.get('MSYSTEM','')=='MINGW32':
return True
return False
def msvc_runtime_library():
"Return name of MSVC runtime library if Python was built with MSVC >= 7"
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
lib = {'1300' : 'msvcr70', # MSVC 7.0
'1310' : 'msvcr71', # MSVC 7.1
'1400' : 'msvcr80', # MSVC 8
'1500' : 'msvcr90', # MSVC 9 (VS 2008)
}.get(msc_ver, None)
else:
lib = None
return lib
def msvc_on_amd64():
if not (sys.platform=='win32' or os.name=='nt'):
return
if get_build_architecture() != 'AMD64':
return
if 'DISTUTILS_USE_SDK' in os.environ:
return
# try to avoid _MSVCCompiler__root attribute error
print('Forcing DISTUTILS_USE_SDK=1')
os.environ['DISTUTILS_USE_SDK']='1'
return
#########################
#XXX need support for .C that is also C++
cxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z',re.I).match
fortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\Z',re.I).match
f90_ext_match = re.compile(r'.*[.](f90|f95)\Z',re.I).match
f90_module_name_match = re.compile(r'\s*module\s*(?P<name>[\w_]+)',re.I).match
def _get_f90_modules(source):
"""Return a list of Fortran f90 module names that
given source file defines.
"""
if not f90_ext_match(source):
return []
modules = []
f = open(source,'r')
f_readlines = getattr(f,'xreadlines',f.readlines)
for line in f_readlines():
m = f90_module_name_match(line)
if m:
name = m.group('name')
modules.append(name)
# break # XXX can we assume that there is one module per file?
f.close()
return modules
def is_string(s):
return isinstance(s, str)
def all_strings(lst):
"""Return True if all items in lst are string objects. """
for item in lst:
if not is_string(item):
return False
return True
def is_sequence(seq):
if is_string(seq):
return False
try:
len(seq)
except:
return False
return True
def is_glob_pattern(s):
return is_string(s) and ('*' in s or '?' is s)
def as_list(seq):
if is_sequence(seq):
return list(seq)
else:
return [seq]
def get_language(sources):
# not used in numpy/scipy packages, use build_ext.detect_language instead
"""Determine language value (c,f77,f90) from sources """
language = None
for source in sources:
if isinstance(source, str):
if f90_ext_match(source):
language = 'f90'
break
elif fortran_ext_match(source):
language = 'f77'
return language
def has_f_sources(sources):
"""Return True if sources contains Fortran files """
for source in sources:
if fortran_ext_match(source):
return True
return False
def has_cxx_sources(sources):
"""Return True if sources contains C++ files """
for source in sources:
if cxx_ext_match(source):
return True
return False
def filter_sources(sources):
"""Return four lists of filenames containing
C, C++, Fortran, and Fortran 90 module sources,
respectively.
"""
c_sources = []
cxx_sources = []
f_sources = []
fmodule_sources = []
for source in sources:
if fortran_ext_match(source):
modules = _get_f90_modules(source)
if modules:
fmodule_sources.append(source)
else:
f_sources.append(source)
elif cxx_ext_match(source):
cxx_sources.append(source)
else:
c_sources.append(source)
return c_sources, cxx_sources, f_sources, fmodule_sources
def _get_headers(directory_list):
# get *.h files from list of directories
headers = []
for d in directory_list:
head = glob.glob(os.path.join(d,"*.h")) #XXX: *.hpp files??
headers.extend(head)
return headers
def _get_directories(list_of_sources):
# get unique directories from list of sources.
direcs = []
for f in list_of_sources:
d = os.path.split(f)
if d[0] != '' and not d[0] in direcs:
direcs.append(d[0])
return direcs
def get_dependencies(sources):
#XXX scan sources for include statements
return _get_headers(_get_directories(sources))
def is_local_src_dir(directory):
"""Return true if directory is local directory.
"""
if not is_string(directory):
return False
abs_dir = os.path.abspath(directory)
c = os.path.commonprefix([os.getcwd(),abs_dir])
new_dir = abs_dir[len(c):].split(os.sep)
if new_dir and not new_dir[0]:
new_dir = new_dir[1:]
if new_dir and new_dir[0]=='build':
return False
new_dir = os.sep.join(new_dir)
return os.path.isdir(new_dir)
def general_source_files(top_path):
pruned_directories = {'CVS':1, '.svn':1, 'build':1}
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for f in filenames:
if not prune_file_pat.search(f):
yield os.path.join(dirpath, f)
def general_source_directories_files(top_path):
"""Return a directory name relative to top_path and
files contained.
"""
pruned_directories = ['CVS','.svn','build']
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for d in dirnames:
dpath = os.path.join(dirpath, d)
rpath = rel_path(dpath, top_path)
files = []
for f in os.listdir(dpath):
fn = os.path.join(dpath,f)
if os.path.isfile(fn) and not prune_file_pat.search(fn):
files.append(fn)
yield rpath, files
dpath = top_path
rpath = rel_path(dpath, top_path)
filenames = [os.path.join(dpath,f) for f in os.listdir(dpath) \
if not prune_file_pat.search(f)]
files = [f for f in filenames if os.path.isfile(f)]
yield rpath, files
def get_ext_source_files(ext):
# Get sources and any include files in the same directory.
filenames = []
sources = filter(is_string, ext.sources)
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
for d in ext.depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_script_files(scripts):
scripts = filter(is_string, scripts)
return scripts
def get_lib_source_files(lib):
filenames = []
sources = lib[1].get('sources',[])
sources = filter(is_string, sources)
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
depends = lib[1].get('depends',[])
for d in depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_data_files(data):
if is_string(data):
return [data]
sources = data[1]
filenames = []
for s in sources:
if hasattr(s, '__call__'):
continue
if is_local_src_dir(s):
filenames.extend(list(general_source_files(s)))
elif is_string(s):
if os.path.isfile(s):
filenames.append(s)
else:
print('Not existing data file:',s)
else:
raise TypeError(repr(s))
return filenames
def dot_join(*args):
return '.'.join([a for a in args if a])
def get_frame(level=0):
"""Return frame object from call stack with given level.
"""
try:
return sys._getframe(level+1)
except AttributeError:
frame = sys.exc_info()[2].tb_frame
for _ in range(level+1):
frame = frame.f_back
return frame
class SconsInfo(object):
"""
Container object holding build info for building a package with scons.
Parameters
----------
scons_path : str or None
Path to scons script, relative to the directory of setup.py.
If None, no scons script is specified. This can be useful to add only
pre- and post-hooks to a configuration.
parent_name : str or None
Name of the parent package (for example "numpy").
pre_hook : sequence of callables or None
Callables that are executed before scons is invoked.
Each callable should be defined as ``callable(*args, **kw)``.
post_hook : sequence of callables or None
Callables that are executed after scons is invoked.
Each callable should be defined as ``callable(*args, **kw)``.
source_files : list of str or None
List of paths to source files, relative to the directory of setup.py.
pkg_path : str or None
Path to the package for which the `SconsInfo` instance holds the
build info, relative to the directory of setup.py.
Notes
-----
All parameters are available as attributes of a `SconsInfo` instance.
"""
def __init__(self, scons_path, parent_name, pre_hook,
post_hook, source_files, pkg_path):
self.scons_path = scons_path
self.parent_name = parent_name
self.pre_hook = pre_hook
self.post_hook = post_hook
self.source_files = source_files
if pkg_path:
self.pkg_path = pkg_path
else:
if scons_path:
self.pkg_path = os.path.dirname(scons_path)
else:
self.pkg_path = ''
######################
class Configuration(object):
_list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',
'libraries', 'headers', 'scripts', 'py_modules', 'scons_data',
'installed_libraries']
_dict_keys = ['package_dir', 'installed_pkg_config']
_extra_keys = ['name', 'version']
numpy_include_dirs = []
def __init__(self,
package_name=None,
parent_name=None,
top_path=None,
package_path=None,
caller_level=1,
setup_name='setup.py',
**attrs):
"""Construct configuration instance of a package.
package_name -- name of the package
Ex.: 'distutils'
parent_name -- name of the parent package
Ex.: 'numpy'
top_path -- directory of the toplevel package
Ex.: the directory where the numpy package source sits
package_path -- directory of package. Will be computed by magic from the
directory of the caller module if not specified
Ex.: the directory where numpy.distutils is
caller_level -- frame level to caller namespace, internal parameter.
"""
self.name = dot_join(parent_name, package_name)
self.version = None
caller_frame = get_frame(caller_level)
self.local_path = get_path_from_frame(caller_frame, top_path)
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
if top_path is None:
top_path = self.local_path
self.local_path = ''
if package_path is None:
package_path = self.local_path
elif os.path.isdir(njoin(self.local_path,package_path)):
package_path = njoin(self.local_path,package_path)
if not os.path.isdir(package_path or '.'):
raise ValueError("%r is not a directory" % (package_path,))
self.top_path = top_path
self.package_path = package_path
# this is the relative path in the installed package
self.path_in_package = os.path.join(*self.name.split('.'))
self.list_keys = self._list_keys[:]
self.dict_keys = self._dict_keys[:]
for n in self.list_keys:
v = copy.copy(attrs.get(n, []))
setattr(self, n, as_list(v))
for n in self.dict_keys:
v = copy.copy(attrs.get(n, {}))
setattr(self, n, v)
known_keys = self.list_keys + self.dict_keys
self.extra_keys = self._extra_keys[:]
for n in attrs.keys():
if n in known_keys:
continue
a = attrs[n]
setattr(self,n,a)
if isinstance(a, list):
self.list_keys.append(n)
elif isinstance(a, dict):
self.dict_keys.append(n)
else:
self.extra_keys.append(n)
if os.path.exists(njoin(package_path,'__init__.py')):
self.packages.append(self.name)
self.package_dir[self.name] = package_path
self.options = dict(
ignore_setup_xxx_py = False,
assume_default_configuration = False,
delegate_options_to_subpackages = False,
quiet = False,
)
caller_instance = None
for i in range(1,3):
try:
f = get_frame(i)
except ValueError:
break
try:
caller_instance = eval('self',f.f_globals,f.f_locals)
break
except NameError:
pass
if isinstance(caller_instance, self.__class__):
if caller_instance.options['delegate_options_to_subpackages']:
self.set_options(**caller_instance.options)
self.setup_name = setup_name
def todict(self):
"""
Return a dictionary compatible with the keyword arguments of distutils
setup function.
Examples
--------
>>> setup(**config.todict()) #doctest: +SKIP
"""
self._optimize_data_files()
d = {}
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for n in known_keys:
a = getattr(self,n)
if a:
d[n] = a
return d
def info(self, message):
if not self.options['quiet']:
print(message)
def warn(self, message):
sys.stderr.write('Warning: %s' % (message,))
def set_options(self, **options):
"""
Configure Configuration instance.
The following options are available:
- ignore_setup_xxx_py
- assume_default_configuration
- delegate_options_to_subpackages
- quiet
"""
for key, value in options.items():
if key in self.options:
self.options[key] = value
else:
raise ValueError('Unknown option: '+key)
def get_distribution(self):
"""Return the distutils distribution object for self."""
from numpy.distutils.core import get_distribution
return get_distribution()
def _wildcard_get_subpackage(self, subpackage_name,
parent_name,
caller_level = 1):
l = subpackage_name.split('.')
subpackage_path = njoin([self.local_path]+l)
dirs = filter(os.path.isdir,glob.glob(subpackage_path))
config_list = []
for d in dirs:
if not os.path.isfile(njoin(d,'__init__.py')):
continue
if 'build' in d.split(os.sep):
continue
n = '.'.join(d.split(os.sep)[-len(l):])
c = self.get_subpackage(n,
parent_name = parent_name,
caller_level = caller_level+1)
config_list.extend(c)
return config_list
def _get_configuration_from_setup_py(self, setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = 1):
# In case setup_py imports local modules:
sys.path.insert(0,os.path.dirname(setup_py))
try:
fo_setup_py = open(setup_py, 'U')
setup_name = os.path.splitext(os.path.basename(setup_py))[0]
n = dot_join(self.name,subpackage_name,setup_name)
setup_module = imp.load_module('_'.join(n.split('.')),
fo_setup_py,
setup_py,
('.py', 'U', 1))
fo_setup_py.close()
if not hasattr(setup_module,'configuration'):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s does not define configuration())'\
% (setup_module))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level + 1)
else:
pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1]))
args = (pn,)
def fix_args_py2(args):
if setup_module.configuration.func_code.co_argcount > 1:
args = args + (self.top_path,)
return args
def fix_args_py3(args):
if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
return args
if sys.version_info[0] < 3:
args = fix_args_py2(args)
else:
args = fix_args_py3(args)
config = setup_module.configuration(*args)
if config.name!=dot_join(parent_name,subpackage_name):
self.warn('Subpackage %r configuration returned as %r' % \
(dot_join(parent_name,subpackage_name), config.name))
finally:
del sys.path[0]
return config
def get_subpackage(self,subpackage_name,
subpackage_path=None,
parent_name=None,
caller_level = 1):
"""Return list of subpackage configurations.
Parameters
----------
subpackage_name: str,None
Name of the subpackage to get the configuration. '*' in
subpackage_name is handled as a wildcard.
subpackage_path: str
If None, then the path is assumed to be the local path plus the
subpackage_name. If a setup.py file is not found in the
subpackage_path, then a default configuration is used.
parent_name: str
Parent name.
"""
if subpackage_name is None:
if subpackage_path is None:
raise ValueError(
"either subpackage_name or subpackage_path must be specified")
subpackage_name = os.path.basename(subpackage_path)
# handle wildcards
l = subpackage_name.split('.')
if subpackage_path is None and '*' in subpackage_name:
return self._wildcard_get_subpackage(subpackage_name,
parent_name,
caller_level = caller_level+1)
assert '*' not in subpackage_name,repr((subpackage_name, subpackage_path,parent_name))
if subpackage_path is None:
subpackage_path = njoin([self.local_path] + l)
else:
subpackage_path = njoin([subpackage_path] + l[:-1])
subpackage_path = self.paths([subpackage_path])[0]
setup_py = njoin(subpackage_path, self.setup_name)
if not self.options['ignore_setup_xxx_py']:
if not os.path.isfile(setup_py):
setup_py = njoin(subpackage_path,
'setup_%s.py' % (subpackage_name))
if not os.path.isfile(setup_py):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s/{setup_%s,setup}.py was not found)' \
% (os.path.dirname(setup_py), subpackage_name))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level+1)
else:
config = self._get_configuration_from_setup_py(
setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = caller_level + 1)
if config:
return [config]
else:
return []
def add_subpackage(self,subpackage_name,
subpackage_path=None,
standalone = False):
"""Add a sub-package to the current Configuration instance.
This is useful in a setup.py script for adding sub-packages to a
package.
Parameters
----------
subpackage_name: str
name of the subpackage
subpackage_path: str
if given, the subpackage path such as the subpackage is in
subpackage_path / subpackage_name. If None,the subpackage is
assumed to be located in the local path / subpackage_name.
standalone: bool
"""
if standalone:
parent_name = None
else:
parent_name = self.name
config_list = self.get_subpackage(subpackage_name,subpackage_path,
parent_name = parent_name,
caller_level = 2)
if not config_list:
self.warn('No configuration returned, assuming unavailable.')
for config in config_list:
d = config
if isinstance(config, Configuration):
d = config.todict()
assert isinstance(d,dict),repr(type(d))
self.info('Appending %s configuration to %s'
% (d.get('name'), self.name))
self.dict_append(**d)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'
' it may be too late to add a subpackage '+
subpackage_name)
def add_data_dir(self,data_path):
"""Recursively add files under data_path to data_files list.
Recursively add files under data_path to the list of data_files to be
installed (and distributed). The data_path can be either a relative
path-name, or an absolute path-name, or a 2-tuple where the first
argument shows where in the install directory the data directory
should be installed to.
Parameters
----------
data_path: seq,str
Argument can be either
* 2-sequence (<datadir suffix>,<path to data directory>)
* path to data directory where python datadir suffix defaults
to package dir.
Notes
-----
Rules for installation paths:
foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
(gun, foo/bar) -> parent/gun
foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b
(gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun
(gun/*, foo/*) -> parent/gun/a, parent/gun/b
/foo/bar -> (bar, /foo/bar) -> parent/bar
(gun, /foo/bar) -> parent/gun
(fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
Examples
--------
For example suppose the source directory contains fun/foo.dat and
fun/bar/car.dat::
>>> self.add_data_dir('fun') #doctest: +SKIP
>>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP
>>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP
Will install data-files to the locations::
<package install directory>/
fun/
foo.dat
bar/
car.dat
sun/
foo.dat
bar/
car.dat
gun/
foo.dat
car.dat
"""
if is_sequence(data_path):
d, data_path = data_path
else:
d = None
if is_sequence(data_path):
[self.add_data_dir((d,p)) for p in data_path]
return
if not is_string(data_path):
raise TypeError("not a string: %r" % (data_path,))
if d is None:
if os.path.isabs(data_path):
return self.add_data_dir((os.path.basename(data_path),
data_path))
return self.add_data_dir((data_path, data_path))
paths = self.paths(data_path, include_non_existing=False)
if is_glob_pattern(data_path):
if is_glob_pattern(d):
pattern_list = allpath(d).split(os.sep)
pattern_list.reverse()
# /a/*//b/ -> /a/*/b
rl = range(len(pattern_list)-1); rl.reverse()
for i in rl:
if not pattern_list[i]:
del pattern_list[i]
#
for path in paths:
if not os.path.isdir(path):
print('Not a directory, skipping',path)
continue
rpath = rel_path(path, self.local_path)
path_list = rpath.split(os.sep)
path_list.reverse()
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
if i>=len(path_list):
raise ValueError('cannot fill pattern %r with %r' \
% (d, path))
target_list.append(path_list[i])
else:
assert s==path_list[i],repr((s,path_list[i],data_path,d,path,rpath))
target_list.append(s)
i += 1
if path_list[i:]:
self.warn('mismatch of pattern_list=%s and path_list=%s'\
% (pattern_list,path_list))
target_list.reverse()
self.add_data_dir((os.sep.join(target_list),path))
else:
for path in paths:
self.add_data_dir((d,path))
return
assert not is_glob_pattern(d),repr(d)
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
for path in paths:
for d1,f in list(general_source_directories_files(path)):
target_path = os.path.join(self.path_in_package,d,d1)
data_files.append((target_path, f))
def _optimize_data_files(self):
data_dict = {}
for p,files in self.data_files:
if p not in data_dict:
data_dict[p] = set()
for f in files:
data_dict[p].add(f)
self.data_files[:] = [(p,list(files)) for p,files in data_dict.items()]
def add_data_files(self,*files):
"""Add data files to configuration data_files.
Parameters
----------
files: sequence
Argument(s) can be either
* 2-sequence (<datadir prefix>,<path to data file(s)>)
* paths to data files where python datadir prefix defaults
to package dir.
Notes
-----
The form of each element of the files sequence is very flexible
allowing many combinations of where to get the files from the package
and where they should ultimately be installed on the system. The most
basic usage is for an element of the files argument sequence to be a
simple filename. This will cause that file from the local path to be
installed to the installation path of the self.name package (package
path). The file argument can also be a relative path in which case the
entire relative path will be installed into the package directory.
Finally, the file can be an absolute path name in which case the file
will be found at the absolute path name but installed to the package
path.
This basic behavior can be augmented by passing a 2-tuple in as the
file argument. The first element of the tuple should specify the
relative path (under the package install directory) where the
remaining sequence of files should be installed to (it has nothing to
do with the file-names in the source distribution). The second element
of the tuple is the sequence of files that should be installed. The
files in this sequence can be filenames, relative paths, or absolute
paths. For absolute paths the file will be installed in the top-level
package installation directory (regardless of the first argument).
Filenames and relative path names will be installed in the package
install directory under the path name given as the first element of
the tuple.
Rules for installation paths:
#. file.txt -> (., file.txt)-> parent/file.txt
#. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt
#. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt
#. *.txt -> parent/a.txt, parent/b.txt
#. foo/*.txt -> parent/foo/a.txt, parent/foo/b.txt
#. */*.txt -> (*, */*.txt) -> parent/c/a.txt, parent/d/b.txt
#. (sun, file.txt) -> parent/sun/file.txt
#. (sun, bar/file.txt) -> parent/sun/file.txt
#. (sun, /foo/bar/file.txt) -> parent/sun/file.txt
#. (sun, *.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun, bar/*.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun/*, */*.txt) -> parent/sun/c/a.txt, parent/d/b.txt
An additional feature is that the path to a data-file can actually be
a function that takes no arguments and returns the actual path(s) to
the data-files. This is useful when the data files are generated while
building the package.
Examples
--------
Add files to the list of data_files to be included with the package.
>>> self.add_data_files('foo.dat',
... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']),
... 'bar/cat.dat',
... '/full/path/to/can.dat') #doctest: +SKIP
will install these data files to::
<package install directory>/
foo.dat
fun/
gun.dat
nun/
pun.dat
sun.dat
bar/
car.dat
can.dat
where <package install directory> is the package (or sub-package)
directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage') or
'/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage').
"""
if len(files)>1:
for f in files:
self.add_data_files(f)
return
assert len(files)==1
if is_sequence(files[0]):
d,files = files[0]
else:
d = None
if is_string(files):
filepat = files
elif is_sequence(files):
if len(files)==1:
filepat = files[0]
else:
for f in files:
self.add_data_files((d,f))
return
else:
raise TypeError(repr(type(files)))
if d is None:
if hasattr(filepat, '__call__'):
d = ''
elif os.path.isabs(filepat):
d = ''
else:
d = os.path.dirname(filepat)
self.add_data_files((d,files))
return
paths = self.paths(filepat, include_non_existing=False)
if is_glob_pattern(filepat):
if is_glob_pattern(d):
pattern_list = d.split(os.sep)
pattern_list.reverse()
for path in paths:
path_list = path.split(os.sep)
path_list.reverse()
path_list.pop() # filename
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
target_list.append(path_list[i])
i += 1
else:
target_list.append(s)
target_list.reverse()
self.add_data_files((os.sep.join(target_list), path))
else:
self.add_data_files((d,paths))
return
assert not is_glob_pattern(d),repr((d,filepat))
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
data_files.append((os.path.join(self.path_in_package,d),paths))
### XXX Implement add_py_modules
def add_include_dirs(self,*paths):
"""Add paths to configuration include directories.
Add the given sequence of paths to the beginning of the include_dirs
list. This list will be visible to all extension modules of the
current package.
"""
include_dirs = self.paths(paths)
dist = self.get_distribution()
if dist is not None:
if dist.include_dirs is None:
dist.include_dirs = []
dist.include_dirs.extend(include_dirs)
else:
self.include_dirs.extend(include_dirs)
def add_numarray_include_dirs(self):
import numpy.numarray.util as nnu
self.add_include_dirs(*nnu.get_numarray_include_dirs())
def add_headers(self,*files):
"""Add installable headers to configuration.
Add the given sequence of files to the beginning of the headers list.
By default, headers will be installed under <python-
include>/<self.name.replace('.','/')>/ directory. If an item of files
is a tuple, then its first argument specifies the actual installation
location relative to the <python-include> path.
Parameters
----------
files: str, seq
Argument(s) can be either:
* 2-sequence (<includedir suffix>,<path to header file(s)>)
* path(s) to header file(s) where python includedir suffix will
default to package name.
"""
headers = []
for path in files:
if is_string(path):
[headers.append((self.name,p)) for p in self.paths(path)]
else:
if not isinstance(path, (tuple, list)) or len(path) != 2:
raise TypeError(repr(path))
[headers.append((path[0],p)) for p in self.paths(path[1])]
dist = self.get_distribution()
if dist is not None:
if dist.headers is None:
dist.headers = []
dist.headers.extend(headers)
else:
self.headers.extend(headers)
def paths(self,*paths,**kws):
"""Apply glob to paths and prepend local_path if needed.
Applies glob.glob(...) to each path in the sequence (if needed) and
pre-pends the local_path if needed. Because this is called on all
source lists, this allows wildcard characters to be specified in lists
of sources for extension modules and libraries and scripts and allows
path-names be relative to the source directory.
"""
include_non_existing = kws.get('include_non_existing',True)
return gpaths(paths,
local_path = self.local_path,
include_non_existing=include_non_existing)
def _fix_paths_dict(self,kw):
for k in kw.keys():
v = kw[k]
if k in ['sources','depends','include_dirs','library_dirs',
'module_dirs','extra_objects']:
new_v = self.paths(v)
kw[k] = new_v
def add_extension(self,name,sources,**kw):
"""Add extension to configuration.
Create and add an Extension instance to the ext_modules list. This
method also takes the following optional keyword arguments that are
passed on to the Extension constructor.
Parameters
----------
name: str
name of the extension
sources: seq
list of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
include_dirs:
define_macros:
undef_macros:
library_dirs:
libraries:
runtime_library_dirs:
extra_objects:
extra_compile_args:
extra_link_args:
export_symbols:
swig_opts:
depends:
The depends list contains paths to files or directories that the
sources of the extension module depend on. If any path in the
depends list is newer than the extension module, then the module
will be rebuilt.
language:
f2py_options:
module_dirs:
extra_info: dict,list
dict or list of dict of keywords to be appended to keywords.
Notes
-----
The self.paths(...) method is applied to all lists that may contain
paths.
"""
ext_args = copy.copy(kw)
ext_args['name'] = dot_join(self.name, name)
ext_args['sources'] = sources
if 'extra_info' in ext_args:
extra_info = ext_args['extra_info']
del ext_args['extra_info']
if isinstance(extra_info, dict):
extra_info = [extra_info]
for info in extra_info:
assert isinstance(info, dict), repr(info)
dict_append(ext_args,**info)
self._fix_paths_dict(ext_args)
# Resolve out-of-tree dependencies
libraries = ext_args.get('libraries',[])
libnames = []
ext_args['libraries'] = []
for libname in libraries:
if isinstance(libname, tuple):
self._fix_paths_dict(libname[1])
# Handle library names of the form libname@relative/path/to/library
if '@' in libname:
lname, lpath = libname.split('@',1)
lpath = os.path.abspath(njoin(self.local_path, lpath))
if os.path.isdir(lpath):
c = self.get_subpackage(None, lpath, caller_level=2)
if isinstance(c, Configuration):
c = c.todict()
for l in [l[0] for l in c.get('libraries',[])]:
llname = l.split('__OF__',1)[0]
if llname == lname:
c.pop('name',None)
dict_append(ext_args,**c)
break
continue
libnames.append(libname)
ext_args['libraries'] = libnames + ext_args['libraries']
from numpy.distutils.core import Extension
ext = Extension(**ext_args)
self.ext_modules.append(ext)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'
' it may be too late to add an extension ' + name)
return ext
def add_library(self,name,sources,**build_info):
"""
Add library to configuration.
Parameters
----------
name : str
Name of the extension.
sources : sequence
List of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* f2py_options
* language
"""
self._add_library(name, sources, None, build_info)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a library '+ name)
def _add_library(self, name, sources, install_dir, build_info):
"""Common implementation for add_library and add_installed_library. Do
not use directly"""
build_info = copy.copy(build_info)
name = name #+ '__OF__' + self.name
build_info['sources'] = sources
# Sometimes, depends is not set up to an empty list by default, and if
# depends is not given to add_library, distutils barfs (#1134)
if not 'depends' in build_info:
build_info['depends'] = []
self._fix_paths_dict(build_info)
# Add to libraries list so that it is build with build_clib
self.libraries.append((name, build_info))
def add_installed_library(self, name, sources, install_dir, build_info=None):
"""
Similar to add_library, but the specified library is installed.
Most C libraries used with `distutils` are only used to build python
extensions, but libraries built through this method will be installed
so that they can be reused by third-party packages.
Parameters
----------
name : str
Name of the installed library.
sources : sequence
List of the library's source files. See `add_library` for details.
install_dir : str
Path to install the library, relative to the current sub-package.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* f2py_options
* language
Returns
-------
None
See Also
--------
add_library, add_npy_pkg_config, get_info
Notes
-----
The best way to encode the options required to link against the specified
C libraries is to use a "libname.ini" file, and use `get_info` to
retrieve the required options (see `add_npy_pkg_config` for more
information).
"""
if not build_info:
build_info = {}
install_dir = os.path.join(self.package_path, install_dir)
self._add_library(name, sources, install_dir, build_info)
self.installed_libraries.append(InstallableLib(name, build_info, install_dir))
def add_npy_pkg_config(self, template, install_dir, subst_dict=None):
"""
Generate and install a npy-pkg config file from a template.
The config file generated from `template` is installed in the
given install directory, using `subst_dict` for variable substitution.
Parameters
----------
template : str
The path of the template, relatively to the current package path.
install_dir : str
Where to install the npy-pkg config file, relatively to the current
package path.
subst_dict : dict, optional
If given, any string of the form ``@key@`` will be replaced by
``subst_dict[key]`` in the template file when installed. The install
prefix is always available through the variable ``@prefix@``, since the
install prefix is not easy to get reliably from setup.py.
See also
--------
add_installed_library, get_info
Notes
-----
This works for both standard installs and in-place builds, i.e. the
``@prefix@`` refer to the source directory for in-place builds.
Examples
--------
::
config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar})
Assuming the foo.ini.in file has the following content::
[meta]
Name=@foo@
Version=1.0
Description=dummy description
[default]
Cflags=-I@prefix@/include
Libs=
The generated file will have the following content::
[meta]
Name=bar
Version=1.0
Description=dummy description
[default]
Cflags=-Iprefix_dir/include
Libs=
and will be installed as foo.ini in the 'lib' subpath.
"""
if subst_dict is None:
subst_dict = {}
basename = os.path.splitext(template)[0]
template = os.path.join(self.package_path, template)
if self.name in self.installed_pkg_config:
self.installed_pkg_config[self.name].append((template, install_dir,
subst_dict))
else:
self.installed_pkg_config[self.name] = [(template, install_dir,
subst_dict)]
def add_scons_installed_library(self, name, install_dir):
"""
Add a scons-built installable library to distutils.
Parameters
----------
name : str
The name of the library.
install_dir : str
Path to install the library, relative to the current sub-package.
"""
install_dir = os.path.join(self.package_path, install_dir)
self.installed_libraries.append(InstallableLib(name, {}, install_dir))
def add_sconscript(self, sconscript, subpackage_path=None,
standalone = False, pre_hook = None,
post_hook = None, source_files = None, package_path=None):
"""Add a sconscript to configuration.
pre_hook and post hook should be sequences of callable, which will be
use before and after executing scons. The callable should be defined as
callable(*args, **kw). It is ugly, but well, hooks are ugly anyway...
sconscript can be None, which can be useful to add only post/pre
hooks."""
if standalone:
parent_name = None
else:
parent_name = self.name
dist = self.get_distribution()
# Convert the sconscript name to a relative filename (relative from top
# setup.py's directory)
fullsconsname = self.paths(sconscript)[0]
# XXX: Think about a way to automatically register source files from
# scons...
full_source_files = []
if source_files:
full_source_files.extend([self.paths(i)[0] for i in source_files])
scons_info = SconsInfo(fullsconsname, parent_name,
pre_hook, post_hook,
full_source_files, package_path)
if dist is not None:
if dist.scons_data is None:
dist.scons_data = []
dist.scons_data.append(scons_info)
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a subpackage '+ subpackage_name)
# XXX: we add a fake extension, to correctly initialize some
# options in distutils command.
dist.add_extension('', sources = [])
else:
self.scons_data.append(scons_info)
# XXX: we add a fake extension, to correctly initialize some
# options in distutils command.
self.add_extension('', sources = [])
def add_scripts(self,*files):
"""Add scripts to configuration.
Add the sequence of files to the beginning of the scripts list.
Scripts will be installed under the <prefix>/bin/ directory.
"""
scripts = self.paths(files)
dist = self.get_distribution()
if dist is not None:
if dist.scripts is None:
dist.scripts = []
dist.scripts.extend(scripts)
else:
self.scripts.extend(scripts)
def dict_append(self,**dict):
for key in self.list_keys:
a = getattr(self,key)
a.extend(dict.get(key,[]))
for key in self.dict_keys:
a = getattr(self,key)
a.update(dict.get(key,{}))
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for key in dict.keys():
if key not in known_keys:
a = getattr(self, key, None)
if a and a==dict[key]: continue
self.warn('Inheriting attribute %r=%r from %r' \
% (key,dict[key],dict.get('name','?')))
setattr(self,key,dict[key])
self.extra_keys.append(key)
elif key in self.extra_keys:
self.info('Ignoring attempt to set %r (from %r to %r)' \
% (key, getattr(self,key), dict[key]))
elif key in known_keys:
# key is already processed above
pass
else:
raise ValueError("Don't know about key=%r" % (key))
def __str__(self):
from pprint import pformat
known_keys = self.list_keys + self.dict_keys + self.extra_keys
s = '<'+5*'-' + '\n'
s += 'Configuration of '+self.name+':\n'
known_keys.sort()
for k in known_keys:
a = getattr(self,k,None)
if a:
s += '%s = %s\n' % (k,pformat(a))
s += 5*'-' + '>'
return s
def get_config_cmd(self):
"""
Returns the numpy.distutils config command instance.
"""
cmd = get_cmd('config')
cmd.ensure_finalized()
cmd.dump_source = 0
cmd.noisy = 0
old_path = os.environ.get('PATH')
if old_path:
path = os.pathsep.join(['.',old_path])
os.environ['PATH'] = path
return cmd
def get_build_temp_dir(self):
"""
Return a path to a temporary directory where temporary files should be
placed.
"""
cmd = get_cmd('build')
cmd.ensure_finalized()
return cmd.build_temp
def have_f77c(self):
"""Check for availability of Fortran 77 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 77 compiler is available (because a simple Fortran 77
code was able to be compiled successfully).
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine,lang='f77')
return flag
def have_f90c(self):
"""Check for availability of Fortran 90 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 90 compiler is available (because a simple Fortran
90 code was able to be compiled successfully)
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine,lang='f90')
return flag
def append_to(self, extlib):
"""Append libraries, include_dirs to extension or library item.
"""
if is_sequence(extlib):
lib_name, build_info = extlib
dict_append(build_info,
libraries=self.libraries,
include_dirs=self.include_dirs)
else:
from numpy.distutils.core import Extension
assert isinstance(extlib,Extension), repr(extlib)
extlib.libraries.extend(self.libraries)
extlib.include_dirs.extend(self.include_dirs)
def _get_svn_revision(self,path):
"""Return path's SVN revision number.
"""
revision = None
m = None
cwd = os.getcwd()
try:
os.chdir(path or '.')
p = subprocess.Popen(['svnversion'], shell=True,
stdout=subprocess.PIPE, stderr=None,
close_fds=True)
sout = p.stdout
m = re.match(r'(?P<revision>\d+)', sout.read())
except:
pass
os.chdir(cwd)
if m:
revision = int(m.group('revision'))
return revision
if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK',None):
entries = njoin(path,'_svn','entries')
else:
entries = njoin(path,'.svn','entries')
if os.path.isfile(entries):
f = open(entries)
fstr = f.read()
f.close()
if fstr[:5] == '<?xml': # pre 1.4
m = re.search(r'revision="(?P<revision>\d+)"',fstr)
if m:
revision = int(m.group('revision'))
else: # non-xml entries file --- check to be sure that
m = re.search(r'dir[\n\r]+(?P<revision>\d+)', fstr)
if m:
revision = int(m.group('revision'))
return revision
def _get_hg_revision(self,path):
"""Return path's Mercurial revision number.
"""
revision = None
m = None
cwd = os.getcwd()
try:
os.chdir(path or '.')
p = subprocess.Popen(['hg identify --num'], shell=True,
stdout=subprocess.PIPE, stderr=None,
close_fds=True)
sout = p.stdout
m = re.match(r'(?P<revision>\d+)', sout.read())
except:
pass
os.chdir(cwd)
if m:
revision = int(m.group('revision'))
return revision
branch_fn = njoin(path,'.hg','branch')
branch_cache_fn = njoin(path,'.hg','branch.cache')
if os.path.isfile(branch_fn):
branch0 = None
f = open(branch_fn)
revision0 = f.read().strip()
f.close()
branch_map = {}
for line in file(branch_cache_fn, 'r'):
branch1, revision1 = line.split()[:2]
if revision1==revision0:
branch0 = branch1
try:
revision1 = int(revision1)
except ValueError:
continue
branch_map[branch1] = revision1
revision = branch_map.get(branch0)
return revision
def get_version(self, version_file=None, version_variable=None):
"""Try to get version string of a package.
Return a version string of the current package or None if the version
information could not be detected.
Notes
-----
This method scans files named
__version__.py, <packagename>_version.py, version.py, and
__svn_version__.py for string variables version, __version\__, and
<packagename>_version, until a version number is found.
"""
version = getattr(self,'version',None)
if version is not None:
return version
# Get version from version file.
if version_file is None:
files = ['__version__.py',
self.name.split('.')[-1]+'_version.py',
'version.py',
'__svn_version__.py',
'__hg_version__.py']
else:
files = [version_file]
if version_variable is None:
version_vars = ['version',
'__version__',
self.name.split('.')[-1]+'_version']
else:
version_vars = [version_variable]
for f in files:
fn = njoin(self.local_path,f)
if os.path.isfile(fn):
info = (open(fn),fn,('.py','U',1))
name = os.path.splitext(os.path.basename(fn))[0]
n = dot_join(self.name,name)
try:
version_module = imp.load_module('_'.join(n.split('.')),*info)
except ImportError:
msg = get_exception()
self.warn(str(msg))
version_module = None
if version_module is None:
continue
for a in version_vars:
version = getattr(version_module,a,None)
if version is not None:
break
if version is not None:
break
if version is not None:
self.version = version
return version
# Get version as SVN or Mercurial revision number
revision = self._get_svn_revision(self.local_path)
if revision is None:
revision = self._get_hg_revision(self.local_path)
if revision is not None:
version = str(revision)
self.version = version
return version
def make_svn_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__svn_version__.py file to the current package directory.
Generate package __svn_version__.py file from SVN revision number,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __svn_version__.py existed before, nothing is done.
This is
intended for working with source directories that are in an SVN
repository.
"""
target = njoin(self.local_path,'__svn_version__.py')
revision = self._get_svn_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_svn_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target,version))
f = open(target,'w')
f.write('version = %r\n' % (version))
f.close()
import atexit
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_svn_version_py()))
def make_hg_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__hg_version__.py file to the current package directory.
Generate package __hg_version__.py file from Mercurial revision,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __hg_version__.py existed before, nothing is done.
This is intended for working with source directories that are
in an Mercurial repository.
"""
target = njoin(self.local_path,'__hg_version__.py')
revision = self._get_hg_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_hg_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target,version))
f = open(target,'w')
f.write('version = %r\n' % (version))
f.close()
import atexit
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_hg_version_py()))
def make_config_py(self,name='__config__'):
"""Generate package __config__.py file containing system_info
information used during building the package.
This file is installed to the
package installation directory.
"""
self.py_modules.append((self.name,name,generate_config_py))
def scons_make_config_py(self, name = '__config__'):
"""Generate package __config__.py file containing system_info
information used during building the package.
"""
self.py_modules.append((self.name, name, scons_generate_config_py))
def get_info(self,*names):
"""Get resources information.
Return information (from system_info.get_info) for all of the names in
the argument list in a single dictionary.
"""
from system_info import get_info, dict_append
info_dict = {}
for a in names:
dict_append(info_dict,**get_info(a))
return info_dict
def get_cmd(cmdname, _cache={}):
if cmdname not in _cache:
import distutils.core
dist = distutils.core._setup_distribution
if dist is None:
from distutils.errors import DistutilsInternalError
raise DistutilsInternalError(
'setup distribution instance not initialized')
cmd = dist.get_command_obj(cmdname)
_cache[cmdname] = cmd
return _cache[cmdname]
def get_numpy_include_dirs():
# numpy_include_dirs are set by numpy/core/setup.py, otherwise []
include_dirs = Configuration.numpy_include_dirs[:]
if not include_dirs:
import numpy
include_dirs = [ numpy.get_include() ]
# else running numpy/core/setup.py
return include_dirs
def get_npy_pkg_dir():
"""Return the path where to find the npy-pkg-config directory."""
# XXX: import here for bootstrapping reasons
import numpy
d = os.path.join(os.path.dirname(numpy.__file__),
'core', 'lib', 'npy-pkg-config')
return d
def get_pkg_info(pkgname, dirs=None):
"""
Return library info for the given package.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of additional directories where to look
for npy-pkg-config files. Those directories are searched prior to the
NumPy directory.
Returns
-------
pkginfo : class instance
The `LibraryInfo` instance containing the build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
Configuration.add_npy_pkg_config, Configuration.add_installed_library,
get_info
"""
from numpy.distutils.npy_pkg_config import read_config
if dirs:
dirs.append(get_npy_pkg_dir())
else:
dirs = [get_npy_pkg_dir()]
return read_config(pkgname, dirs)
def get_info(pkgname, dirs=None):
"""
Return an info dict for a given C library.
The info dict contains the necessary options to use the C library.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of additional directories where to look
for npy-pkg-config files. Those directories are searched prior to the
NumPy directory.
Returns
-------
info : dict
The dictionary with build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
Configuration.add_npy_pkg_config, Configuration.add_installed_library,
get_pkg_info
Examples
--------
To get the necessary information for the npymath library from NumPy:
>>> npymath_info = np.distutils.misc_util.get_info('npymath')
>>> npymath_info #doctest: +SKIP
{'define_macros': [], 'libraries': ['npymath'], 'library_dirs':
['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']}
This info dict can then be used as input to a `Configuration` instance::
config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info)
"""
from numpy.distutils.npy_pkg_config import parse_flags
pkg_info = get_pkg_info(pkgname, dirs)
# Translate LibraryInfo instance into a build_info dict
info = parse_flags(pkg_info.cflags())
for k, v in parse_flags(pkg_info.libs()).items():
info[k].extend(v)
# add_extension extra_info argument is ANAL
info['define_macros'] = info['macros']
del info['macros']
del info['ignored']
return info
def is_bootstrapping():
import __builtin__
try:
__builtin__.__NUMPY_SETUP__
return True
except AttributeError:
return False
__NUMPY_SETUP__ = False
def scons_generate_config_py(target):
"""generate config.py file containing system_info information
used during building the package.
usage:
config['py_modules'].append((packagename, '__config__',generate_config_py))
"""
from distutils.dir_util import mkpath
from numscons import get_scons_configres_dir, get_scons_configres_filename
d = {}
mkpath(os.path.dirname(target))
f = open(target, 'w')
f.write('# this file is generated by %s\n' % (os.path.abspath(sys.argv[0])))
f.write('# it contains system_info results at the time of building this package.\n')
f.write('__all__ = ["show"]\n\n')
confdir = get_scons_configres_dir()
confilename = get_scons_configres_filename()
for root, dirs, files in os.walk(confdir):
if files:
file = os.path.join(root, confilename)
assert root.startswith(confdir)
pkg_name = '.'.join(root[len(confdir)+1:].split(os.sep))
fid = open(file, 'r')
try:
cnt = fid.read()
d[pkg_name] = eval(cnt)
finally:
fid.close()
# d is a dictionary whose keys are package names, and values the
# corresponding configuration. Each configuration is itself a dictionary
# (lib : libinfo)
f.write('_config = %s\n' % d)
f.write(r'''
def show():
for pkg, config in _config.items():
print("package %s configuration:" % pkg)
for lib, libc in config.items():
print(' %s' % lib)
for line in libc.split('\n'):
print('\t%s' % line)
''')
f.close()
return target
#########################
def default_config_dict(name = None, parent_name = None, local_path=None):
"""Return a configuration dictionary for usage in
configuration() function defined in file setup_<name>.py.
"""
import warnings
warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\
'deprecated default_config_dict(%r,%r,%r)'
% (name, parent_name, local_path,
name, parent_name, local_path,
))
c = Configuration(name, parent_name, local_path)
return c.todict()
def dict_append(d, **kws):
for k, v in kws.items():
if k in d:
ov = d[k]
if isinstance(ov,str):
d[k] = v
else:
d[k].extend(v)
else:
d[k] = v
def appendpath(prefix, path):
if os.path.sep != '/':
prefix = prefix.replace('/', os.path.sep)
path = path.replace('/', os.path.sep)
drive = ''
if os.path.isabs(path):
drive = os.path.splitdrive(prefix)[0]
absprefix = os.path.splitdrive(os.path.abspath(prefix))[1]
pathdrive, path = os.path.splitdrive(path)
d = os.path.commonprefix([absprefix, path])
if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \
or os.path.join(path[:len(d)], path[len(d):]) != path:
# Handle invalid paths
d = os.path.dirname(d)
subpath = path[len(d):]
if os.path.isabs(subpath):
subpath = subpath[1:]
else:
subpath = path
return os.path.normpath(njoin(drive + prefix, subpath))
def generate_config_py(target):
"""Generate config.py file containing system_info information
used during building the package.
Usage:
config['py_modules'].append((packagename, '__config__',generate_config_py))
"""
from numpy.distutils.system_info import system_info
from distutils.dir_util import mkpath
mkpath(os.path.dirname(target))
f = open(target, 'w')
f.write('# This file is generated by %s\n' % (os.path.abspath(sys.argv[0])))
f.write('# It contains system_info results at the time of building this package.\n')
f.write('__all__ = ["get_info","show"]\n\n')
for k, i in system_info.saved_results.items():
f.write('%s=%r\n' % (k, i))
f.write(r'''
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
''')
f.close()
return target
def msvc_version(compiler):
"""Return version major and minor of compiler instance if it is
MSVC, raise an exception otherwise."""
if not compiler.compiler_type == "msvc":
raise ValueError("Compiler instance is not msvc (%s)"\
% compiler.compiler_type)
return compiler._MSVCCompiler__version
if sys.version[:3] >= '2.5':
def get_build_architecture():
from distutils.msvccompiler import get_build_architecture
return get_build_architecture()
else:
#copied from python 2.5.1 distutils/msvccompiler.py
def get_build_architecture():
"""Return the processor architecture.
Possible results are "Intel", "Itanium", or "AMD64".
"""
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return "Intel"
j = sys.version.find(")", i)
return sys.version[i+len(prefix):j]
| gpl-3.0 |
natanielruiz/android-yolo | jni-build/jni/include/tensorflow/contrib/distributions/python/kernel_tests/mvn_test.py | 4 | 16522 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultivariateNormal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
import tensorflow as tf
distributions = tf.contrib.distributions
class MultivariateNormalShapeTest(tf.test.TestCase):
def _testPDFShapes(self, mvn_dist, mu, sigma):
with self.test_session() as sess:
mvn = mvn_dist(mu, sigma)
x = 2 * tf.ones_like(mu)
log_pdf = mvn.log_pdf(x)
pdf = mvn.pdf(x)
mu_value = np.ones([3, 3, 2])
sigma_value = np.zeros([3, 3, 2, 2])
sigma_value[:] = np.identity(2)
x_value = 2. * np.ones([3, 3, 2])
feed_dict = {mu: mu_value, sigma: sigma_value}
scipy_mvn = stats.multivariate_normal(mean=mu_value[(0, 0)],
cov=sigma_value[(0, 0)])
expected_log_pdf = scipy_mvn.logpdf(x_value[(0, 0)])
expected_pdf = scipy_mvn.pdf(x_value[(0, 0)])
log_pdf_evaled, pdf_evaled = sess.run([log_pdf, pdf], feed_dict=feed_dict)
self.assertAllEqual([3, 3], log_pdf_evaled.shape)
self.assertAllEqual([3, 3], pdf_evaled.shape)
self.assertAllClose(expected_log_pdf, log_pdf_evaled[0, 0])
self.assertAllClose(expected_pdf, pdf_evaled[0, 0])
def testPDFUnknownSize(self):
mu = tf.placeholder(tf.float32, shape=(3 * [None]))
sigma = tf.placeholder(tf.float32, shape=(4 * [None]))
self._testPDFShapes(distributions.MultivariateNormalFull, mu, sigma)
self._testPDFShapes(distributions.MultivariateNormalCholesky, mu, sigma)
def testPDFUnknownShape(self):
mu = tf.placeholder(tf.float32)
sigma = tf.placeholder(tf.float32)
self._testPDFShapes(distributions.MultivariateNormalFull, mu, sigma)
self._testPDFShapes(distributions.MultivariateNormalCholesky, mu, sigma)
class MultivariateNormalDiagTest(tf.test.TestCase):
"""Well tested because this is a simple override of the base class."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testMean(self):
mu = [-1.0, 1.0]
diag = [1.0, 5.0]
with self.test_session():
dist = distributions.MultivariateNormalDiag(mu, diag)
self.assertAllEqual(mu, dist.mean().eval())
def testEntropy(self):
mu = [-1.0, 1.0]
diag = [1.0, 5.0]
diag_mat = np.diag(diag)
scipy_mvn = stats.multivariate_normal(mean=mu, cov=diag_mat**2)
with self.test_session():
dist = distributions.MultivariateNormalDiag(mu, diag)
self.assertAllClose(scipy_mvn.entropy(), dist.entropy().eval(), atol=1e-4)
def testNonmatchingMuDiagDimensionsFailsStatic(self):
mu = [-1.0, 1.0]
diag = [[1.0, 5.0]]
with self.test_session():
with self.assertRaisesRegexp(ValueError, "shape.*should match"):
distributions.MultivariateNormalDiag(mu, diag)
def testNonmatchingMuDiagDimensionsFailsDynamic(self):
mu_v = [-1.0, 1.0]
diag_v = [[1.0, 5.0]]
with self.test_session():
mu_ph = tf.placeholder(tf.float32, name="mu_ph")
diag_ph = tf.placeholder(tf.float32, name="diag_ph")
dist = distributions.MultivariateNormalDiag(mu_ph, diag_ph)
with self.assertRaisesOpError("mu should have rank"):
dist.mean().eval(feed_dict={mu_ph: mu_v, diag_ph: diag_v})
def testSample(self):
mu = [-1.0, 1.0]
diag = [1.0, 2.0]
with self.test_session():
dist = distributions.MultivariateNormalDiag(mu, diag)
samps = dist.sample_n(1000, seed=0).eval()
cov_mat = tf.batch_matrix_diag(diag).eval() ** 2
self.assertAllClose(mu, samps.mean(axis=0), atol=0.1)
self.assertAllClose(cov_mat, np.cov(samps.T), atol=0.1)
class MultivariateNormalDiagPlusVDVTTest(tf.test.TestCase):
"""Well tested because this is a simple override of the base class."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testMean(self):
mu = [-1.0, 1.0]
diag_large = [1.0, 5.0]
v = [[2.0], [3.0]]
diag_small = [3.0]
with self.test_session():
dist = distributions.MultivariateNormalDiagPlusVDVT(
mu, diag_large, v, diag_small=diag_small)
self.assertAllEqual(mu, dist.mean().eval())
def testNonmatchingMuAndSigmaDimensionFailsStatic(self):
mu = self._rng.rand(2)
# With this diag_large and v, the covariance is 3 x 3
diag_large = self._rng.rand(3)
v = self._rng.rand(3, 2) # v works with diag_large.
with self.test_session():
with self.assertRaisesRegexp(ValueError, "shape.*should match"):
distributions.MultivariateNormalDiagPlusVDVT(
mu, diag_large, v)
def testNonmatchingMuDiagDimensionsFailsDynamic(self):
mu = self._rng.rand(2)
# With this diag_large and v, the covariance is 3 x 3
diag_large = self._rng.rand(3)
v = self._rng.rand(3, 2) # v works with diag_large.
with self.test_session():
mu_ph = tf.placeholder(tf.float32, name="mu_ph")
v_ph = tf.placeholder(tf.float32, name="v_ph")
diag_ph = tf.placeholder(tf.float32, name="diag_ph")
dist = distributions.MultivariateNormalDiagPlusVDVT(
mu_ph, diag_ph, v_ph)
with self.assertRaisesOpError("mu.*cov.*shape"):
dist.mean().eval(feed_dict={mu_ph: mu, diag_ph: diag_large, v_ph: v})
def testSample(self):
mu = [-1.0, 1.0]
diag_large = [1.0, 0.5]
v = [[0.2], [0.3]]
with self.test_session():
dist = distributions.MultivariateNormalDiagPlusVDVT(mu, diag_large, v)
samps = dist.sample_n(1000, seed=0).eval()
cov_mat = dist.sigma.eval()
self.assertAllClose(mu, samps.mean(axis=0), atol=0.1)
self.assertAllClose(cov_mat, np.cov(samps.T), atol=0.1)
class MultivariateNormalCholeskyTest(tf.test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def _random_chol(self, *shape):
mat = self._rng.rand(*shape)
chol = distributions.batch_matrix_diag_transform(
mat, transform=tf.nn.softplus)
chol = tf.batch_matrix_band_part(chol, -1, 0)
sigma = tf.batch_matmul(chol, chol, adj_y=True)
return chol.eval(), sigma.eval()
def testNonmatchingMuSigmaFailsStatic(self):
with self.test_session():
mu = self._rng.rand(2)
chol, _ = self._random_chol(2, 2, 2)
with self.assertRaisesRegexp(ValueError, "shape.*should match"):
distributions.MultivariateNormalCholesky(mu, chol)
mu = self._rng.rand(2, 1)
chol, _ = self._random_chol(2, 2, 2)
with self.assertRaisesRegexp(ValueError, "shape.*should match"):
distributions.MultivariateNormalCholesky(mu, chol)
def testNonmatchingMuSigmaFailsDynamic(self):
with self.test_session():
mu_ph = tf.placeholder(tf.float64)
chol_ph = tf.placeholder(tf.float64)
mu_v = self._rng.rand(2)
chol_v, _ = self._random_chol(2, 2, 2)
mvn = distributions.MultivariateNormalCholesky(mu_ph, chol_ph)
with self.assertRaisesOpError("mu should have rank 1 less than cov"):
mvn.mean().eval(feed_dict={mu_ph: mu_v, chol_ph: chol_v})
mu_v = self._rng.rand(2, 1)
chol_v, _ = self._random_chol(2, 2, 2)
mvn = distributions.MultivariateNormalCholesky(mu_ph, chol_ph)
with self.assertRaisesOpError("mu.shape and cov.shape.*should match"):
mvn.mean().eval(feed_dict={mu_ph: mu_v, chol_ph: chol_v})
def testLogPDFScalarBatch(self):
with self.test_session():
mu = self._rng.rand(2)
chol, sigma = self._random_chol(2, 2)
mvn = distributions.MultivariateNormalCholesky(mu, chol)
x = self._rng.rand(2)
log_pdf = mvn.log_pdf(x)
pdf = mvn.pdf(x)
scipy_mvn = stats.multivariate_normal(mean=mu, cov=sigma)
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertEqual((), log_pdf.get_shape())
self.assertEqual((), pdf.get_shape())
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(expected_pdf, pdf.eval())
def testLogPDFXIsHigherRank(self):
with self.test_session():
mu = self._rng.rand(2)
chol, sigma = self._random_chol(2, 2)
mvn = distributions.MultivariateNormalCholesky(mu, chol)
x = self._rng.rand(3, 2)
log_pdf = mvn.log_pdf(x)
pdf = mvn.pdf(x)
scipy_mvn = stats.multivariate_normal(mean=mu, cov=sigma)
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertEqual((3,), log_pdf.get_shape())
self.assertEqual((3,), pdf.get_shape())
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(expected_pdf, pdf.eval())
def testLogPDFXLowerDimension(self):
with self.test_session():
mu = self._rng.rand(3, 2)
chol, sigma = self._random_chol(3, 2, 2)
mvn = distributions.MultivariateNormalCholesky(mu, chol)
x = self._rng.rand(2)
log_pdf = mvn.log_pdf(x)
pdf = mvn.pdf(x)
self.assertEqual((3,), log_pdf.get_shape())
self.assertEqual((3,), pdf.get_shape())
# scipy can't do batches, so just test one of them.
scipy_mvn = stats.multivariate_normal(mean=mu[1, :], cov=sigma[1, :, :])
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertAllClose(expected_log_pdf, log_pdf.eval()[1])
self.assertAllClose(expected_pdf, pdf.eval()[1])
def testEntropy(self):
with self.test_session():
mu = self._rng.rand(2)
chol, sigma = self._random_chol(2, 2)
mvn = distributions.MultivariateNormalCholesky(mu, chol)
entropy = mvn.entropy()
scipy_mvn = stats.multivariate_normal(mean=mu, cov=sigma)
expected_entropy = scipy_mvn.entropy()
self.assertEqual(entropy.get_shape(), ())
self.assertAllClose(expected_entropy, entropy.eval())
def testEntropyMultidimensional(self):
with self.test_session():
mu = self._rng.rand(3, 5, 2)
chol, sigma = self._random_chol(3, 5, 2, 2)
mvn = distributions.MultivariateNormalCholesky(mu, chol)
entropy = mvn.entropy()
# Scipy doesn't do batches, so test one of them.
expected_entropy = stats.multivariate_normal(
mean=mu[1, 1, :], cov=sigma[1, 1, :, :]).entropy()
self.assertEqual(entropy.get_shape(), (3, 5))
self.assertAllClose(expected_entropy, entropy.eval()[1, 1])
def testSample(self):
with self.test_session():
mu = self._rng.rand(2)
chol, sigma = self._random_chol(2, 2)
n = tf.constant(100000)
mvn = distributions.MultivariateNormalCholesky(mu, chol)
samples = mvn.sample_n(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (100000, 2))
self.assertAllClose(sample_values.mean(axis=0), mu, atol=1e-2)
self.assertAllClose(np.cov(sample_values, rowvar=0), sigma, atol=1e-1)
def testSampleWithSampleShape(self):
with self.test_session():
mu = self._rng.rand(3, 5, 2)
chol, sigma = self._random_chol(3, 5, 2, 2)
mvn = distributions.MultivariateNormalCholesky(mu, chol)
samples_val = mvn.sample((10, 11, 12), seed=137).eval()
# Check sample shape
self.assertEqual((10, 11, 12, 3, 5, 2), samples_val.shape)
# Check sample means
x = samples_val[:, :, :, 1, 1, :]
self.assertAllClose(
x.reshape(10 * 11 * 12, 2).mean(axis=0),
mu[1, 1], atol=1e-2)
# Check that log_prob(samples) works
log_prob_val = mvn.log_prob(samples_val).eval()
x_log_pdf = log_prob_val[:, :, :, 1, 1]
expected_log_pdf = stats.multivariate_normal(
mean=mu[1, 1, :], cov=sigma[1, 1, :, :]).logpdf(x)
self.assertAllClose(expected_log_pdf, x_log_pdf)
def testSampleMultiDimensional(self):
with self.test_session():
mu = self._rng.rand(3, 5, 2)
chol, sigma = self._random_chol(3, 5, 2, 2)
mvn = distributions.MultivariateNormalCholesky(mu, chol)
n = tf.constant(100000)
samples = mvn.sample_n(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (100000, 3, 5, 2))
self.assertAllClose(
sample_values[:, 1, 1, :].mean(axis=0),
mu[1, 1, :], atol=0.05)
self.assertAllClose(
np.cov(sample_values[:, 1, 1, :], rowvar=0),
sigma[1, 1, :, :], atol=1e-1)
def testShapes(self):
with self.test_session():
mu = self._rng.rand(3, 5, 2)
chol, _ = self._random_chol(3, 5, 2, 2)
mvn = distributions.MultivariateNormalCholesky(mu, chol)
# Shapes known at graph construction time.
self.assertEqual((2,), tuple(mvn.get_event_shape().as_list()))
self.assertEqual((3, 5), tuple(mvn.get_batch_shape().as_list()))
# Shapes known at runtime.
self.assertEqual((2,), tuple(mvn.event_shape().eval()))
self.assertEqual((3, 5), tuple(mvn.batch_shape().eval()))
class MultivariateNormalFullTest(tf.test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def _random_mu_and_sigma(self, batch_shape, event_shape):
# This ensures sigma is positive def.
mat_shape = batch_shape + event_shape + event_shape
mat = self._rng.randn(*mat_shape)
sigma = tf.batch_matmul(mat, mat, adj_y=True).eval()
mu_shape = batch_shape + event_shape
mu = self._rng.randn(*mu_shape)
return mu, sigma
def testKLNonBatch(self):
batch_shape = ()
event_shape = (2,)
with self.test_session():
mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)
mu_b, sigma_b = self._random_mu_and_sigma(batch_shape, event_shape)
mvn_a = distributions.MultivariateNormalFull(mu_a, sigma_a)
mvn_b = distributions.MultivariateNormalFull(mu_b, sigma_b)
kl = distributions.kl(mvn_a, mvn_b)
self.assertEqual(batch_shape, kl.get_shape())
kl_v = kl.eval()
expected_kl = _compute_non_batch_kl(mu_a, sigma_a, mu_b, sigma_b)
self.assertAllClose(expected_kl, kl_v)
def testKLBatch(self):
batch_shape = (2,)
event_shape = (3,)
with self.test_session():
mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)
mu_b, sigma_b = self._random_mu_and_sigma(batch_shape, event_shape)
mvn_a = distributions.MultivariateNormalFull(mu_a, sigma_a)
mvn_b = distributions.MultivariateNormalFull(mu_b, sigma_b)
kl = distributions.kl(mvn_a, mvn_b)
self.assertEqual(batch_shape, kl.get_shape())
kl_v = kl.eval()
expected_kl_0 = _compute_non_batch_kl(
mu_a[0, :], sigma_a[0, :, :], mu_b[0, :], sigma_b[0, :])
expected_kl_1 = _compute_non_batch_kl(
mu_a[1, :], sigma_a[1, :, :], mu_b[1, :], sigma_b[1, :])
self.assertAllClose(expected_kl_0, kl_v[0])
self.assertAllClose(expected_kl_1, kl_v[1])
def testKLTwoIdenticalDistributionsIsZero(self):
batch_shape = (2,)
event_shape = (3,)
with self.test_session():
mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)
mvn_a = distributions.MultivariateNormalFull(mu_a, sigma_a)
# Should be zero since KL(p || p) = =.
kl = distributions.kl(mvn_a, mvn_a)
self.assertEqual(batch_shape, kl.get_shape())
kl_v = kl.eval()
self.assertAllClose(np.zeros(*batch_shape), kl_v)
def _compute_non_batch_kl(mu_a, sigma_a, mu_b, sigma_b):
"""Non-batch KL for N(mu_a, sigma_a), N(mu_b, sigma_b)."""
# Check using numpy operations
# This mostly repeats the tensorflow code _kl_mvn_mvn(), but in numpy.
# So it is important to also check that KL(mvn, mvn) = 0.
sigma_b_inv = np.linalg.inv(sigma_b)
t = np.trace(sigma_b_inv.dot(sigma_a))
q = (mu_b - mu_a).dot(sigma_b_inv).dot(mu_b - mu_a)
k = mu_a.shape[0]
l = np.log(np.linalg.det(sigma_b) / np.linalg.det(sigma_a))
return 0.5 * (t + q - k + l)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
rhattersley/iris | lib/iris/tests/unit/fileformats/grib/message/test__MessageLocation.py | 7 | 1806 | # (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Unit tests for the `iris.fileformats.grib.message._MessageLocation` class.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from iris.fileformats.grib.message import _MessageLocation
from iris.tests import mock
class Test(tests.IrisTest):
def test(self):
message_location = _MessageLocation(mock.sentinel.filename,
mock.sentinel.location)
patch_target = 'iris.fileformats.grib.message._RawGribMessage.' \
'from_file_offset'
expected = mock.sentinel.message
with mock.patch(patch_target, return_value=expected) as rgm:
result = message_location()
rgm.assert_called_once_with(mock.sentinel.filename,
mock.sentinel.location)
self.assertIs(result, expected)
if __name__ == '__main__':
tests.main()
| lgpl-3.0 |
melon-li/openstack-dashboard | openstack_dashboard/dashboards/project/access_and_security/views.py | 66 | 1224 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for Instances and Volumes.
"""
from django.utils.translation import ugettext_lazy as _
from horizon import tabs
from openstack_dashboard.dashboards.project.access_and_security \
import tabs as project_tabs
class IndexView(tabs.TabbedTableView):
tab_group_class = project_tabs.AccessAndSecurityTabs
template_name = 'project/access_and_security/index.html'
page_title = _("Access & Security")
| apache-2.0 |
charlievieth/GoSubl | something_borrowed/diff_match_patch/python3/diff_match_patch_test.py | 284 | 41615 | #!/usr/bin/python3
"""Test harness for diff_match_patch.py
Copyright 2006 Google Inc.
http://code.google.com/p/google-diff-match-patch/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import imp
import sys
import time
import unittest
import diff_match_patch as dmp_module
# Force a module reload. Allows one to edit the DMP module and rerun the tests
# without leaving the Python interpreter.
imp.reload(dmp_module)
class DiffMatchPatchTest(unittest.TestCase):
def setUp(self):
"Test harness for dmp_module."
self.dmp = dmp_module.diff_match_patch()
def diff_rebuildtexts(self, diffs):
# Construct the two texts which made up the diff originally.
text1 = ""
text2 = ""
for x in range(0, len(diffs)):
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_INSERT:
text1 += diffs[x][1]
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_DELETE:
text2 += diffs[x][1]
return (text1, text2)
class DiffTest(DiffMatchPatchTest):
"""DIFF TEST FUNCTIONS"""
def testDiffCommonPrefix(self):
# Detect any common prefix.
# Null case.
self.assertEqual(0, self.dmp.diff_commonPrefix("abc", "xyz"))
# Non-null case.
self.assertEqual(4, self.dmp.diff_commonPrefix("1234abcdef", "1234xyz"))
# Whole case.
self.assertEqual(4, self.dmp.diff_commonPrefix("1234", "1234xyz"))
def testDiffCommonSuffix(self):
# Detect any common suffix.
# Null case.
self.assertEqual(0, self.dmp.diff_commonSuffix("abc", "xyz"))
# Non-null case.
self.assertEqual(4, self.dmp.diff_commonSuffix("abcdef1234", "xyz1234"))
# Whole case.
self.assertEqual(4, self.dmp.diff_commonSuffix("1234", "xyz1234"))
def testDiffCommonOverlap(self):
# Null case.
self.assertEqual(0, self.dmp.diff_commonOverlap("", "abcd"))
# Whole case.
self.assertEqual(3, self.dmp.diff_commonOverlap("abc", "abcd"))
# No overlap.
self.assertEqual(0, self.dmp.diff_commonOverlap("123456", "abcd"))
# Overlap.
self.assertEqual(3, self.dmp.diff_commonOverlap("123456xxx", "xxxabcd"))
# Unicode.
# Some overly clever languages (C#) may treat ligatures as equal to their
# component letters. E.g. U+FB01 == 'fi'
self.assertEqual(0, self.dmp.diff_commonOverlap("fi", "\ufb01i"))
def testDiffHalfMatch(self):
# Detect a halfmatch.
self.dmp.Diff_Timeout = 1
# No match.
self.assertEqual(None, self.dmp.diff_halfMatch("1234567890", "abcdef"))
self.assertEqual(None, self.dmp.diff_halfMatch("12345", "23"))
# Single Match.
self.assertEqual(("12", "90", "a", "z", "345678"), self.dmp.diff_halfMatch("1234567890", "a345678z"))
self.assertEqual(("a", "z", "12", "90", "345678"), self.dmp.diff_halfMatch("a345678z", "1234567890"))
self.assertEqual(("abc", "z", "1234", "0", "56789"), self.dmp.diff_halfMatch("abc56789z", "1234567890"))
self.assertEqual(("a", "xyz", "1", "7890", "23456"), self.dmp.diff_halfMatch("a23456xyz", "1234567890"))
# Multiple Matches.
self.assertEqual(("12123", "123121", "a", "z", "1234123451234"), self.dmp.diff_halfMatch("121231234123451234123121", "a1234123451234z"))
self.assertEqual(("", "-=-=-=-=-=", "x", "", "x-=-=-=-=-=-=-="), self.dmp.diff_halfMatch("x-=-=-=-=-=-=-=-=-=-=-=-=", "xx-=-=-=-=-=-=-="))
self.assertEqual(("-=-=-=-=-=", "", "", "y", "-=-=-=-=-=-=-=y"), self.dmp.diff_halfMatch("-=-=-=-=-=-=-=-=-=-=-=-=y", "-=-=-=-=-=-=-=yy"))
# Non-optimal halfmatch.
# Optimal diff would be -q+x=H-i+e=lloHe+Hu=llo-Hew+y not -qHillo+x=HelloHe-w+Hulloy
self.assertEqual(("qHillo", "w", "x", "Hulloy", "HelloHe"), self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
# Optimal no halfmatch.
self.dmp.Diff_Timeout = 0
self.assertEqual(None, self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
def testDiffLinesToChars(self):
# Convert lines down to characters.
self.assertEqual(("\x01\x02\x01", "\x02\x01\x02", ["", "alpha\n", "beta\n"]), self.dmp.diff_linesToChars("alpha\nbeta\nalpha\n", "beta\nalpha\nbeta\n"))
self.assertEqual(("", "\x01\x02\x03\x03", ["", "alpha\r\n", "beta\r\n", "\r\n"]), self.dmp.diff_linesToChars("", "alpha\r\nbeta\r\n\r\n\r\n"))
self.assertEqual(("\x01", "\x02", ["", "a", "b"]), self.dmp.diff_linesToChars("a", "b"))
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for x in range(1, n + 1):
lineList.append(str(x) + "\n")
charList.append(chr(x))
self.assertEqual(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEqual(n, len(chars))
lineList.insert(0, "")
self.assertEqual((chars, "", lineList), self.dmp.diff_linesToChars(lines, ""))
def testDiffCharsToLines(self):
# Convert chars up to lines.
diffs = [(self.dmp.DIFF_EQUAL, "\x01\x02\x01"), (self.dmp.DIFF_INSERT, "\x02\x01\x02")]
self.dmp.diff_charsToLines(diffs, ["", "alpha\n", "beta\n"])
self.assertEqual([(self.dmp.DIFF_EQUAL, "alpha\nbeta\nalpha\n"), (self.dmp.DIFF_INSERT, "beta\nalpha\nbeta\n")], diffs)
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for x in range(1, n + 1):
lineList.append(str(x) + "\n")
charList.append(chr(x))
self.assertEqual(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEqual(n, len(chars))
lineList.insert(0, "")
diffs = [(self.dmp.DIFF_DELETE, chars)]
self.dmp.diff_charsToLines(diffs, lineList)
self.assertEqual([(self.dmp.DIFF_DELETE, lines)], diffs)
def testDiffCleanupMerge(self):
# Cleanup a messy diff.
# Null case.
diffs = []
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([], diffs)
# No change case.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")], diffs)
# Merge equalities.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "abc")], diffs)
# Merge deletions.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "abc")], diffs)
# Merge insertions.
diffs = [(self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([(self.dmp.DIFF_INSERT, "abc")], diffs)
# Merge interweave.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "d"), (self.dmp.DIFF_EQUAL, "e"), (self.dmp.DIFF_EQUAL, "f")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_INSERT, "bd"), (self.dmp.DIFF_EQUAL, "ef")], diffs)
# Prefix and suffix detection.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "c")], diffs)
# Prefix and suffix detection with equalities.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc"), (self.dmp.DIFF_EQUAL, "y")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "cy")], diffs)
# Slide edit left.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "ba"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([(self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "ac")], diffs)
# Slide edit right.
diffs = [(self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "ca"), (self.dmp.DIFF_INSERT, "ba")], diffs)
# Slide edit left recursive.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_EQUAL, "x")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "acx")], diffs)
# Slide edit right recursive.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "ca"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "xca"), (self.dmp.DIFF_DELETE, "cba")], diffs)
def testDiffCleanupSemanticLossless(self):
# Slide diffs to match logical boundaries.
# Null case.
diffs = []
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEqual([], diffs)
# Blank lines.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\n\r\nBBB"), (self.dmp.DIFF_INSERT, "\r\nDDD\r\n\r\nBBB"), (self.dmp.DIFF_EQUAL, "\r\nEEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "AAA\r\n\r\n"), (self.dmp.DIFF_INSERT, "BBB\r\nDDD\r\n\r\n"), (self.dmp.DIFF_EQUAL, "BBB\r\nEEE")], diffs)
# Line boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\nBBB"), (self.dmp.DIFF_INSERT, " DDD\r\nBBB"), (self.dmp.DIFF_EQUAL, " EEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "AAA\r\n"), (self.dmp.DIFF_INSERT, "BBB DDD\r\n"), (self.dmp.DIFF_EQUAL, "BBB EEE")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_INSERT, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_INSERT, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Alphanumeric boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The-c"), (self.dmp.DIFF_INSERT, "ow-and-the-c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "The-"), (self.dmp.DIFF_INSERT, "cow-and-the-"), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Hitting the start.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "ax")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "aax")], diffs)
# Hitting the end.
diffs = [(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "xaa"), (self.dmp.DIFF_DELETE, "a")], diffs)
# Sentence boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The xxx. The "), (self.dmp.DIFF_INSERT, "zzz. The "), (self.dmp.DIFF_EQUAL, "yyy.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "The xxx."), (self.dmp.DIFF_INSERT, " The zzz."), (self.dmp.DIFF_EQUAL, " The yyy.")], diffs)
def testDiffCleanupSemantic(self):
# Cleanup semantically trivial equalities.
# Null case.
diffs = []
self.dmp.diff_cleanupSemantic(diffs)
self.assertEqual([], diffs)
# No elimination #1.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")], diffs)
# No elimination #2.
diffs = [(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")], diffs)
# Simple elimination.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "b")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_EQUAL, "cd"), (self.dmp.DIFF_DELETE, "e"), (self.dmp.DIFF_EQUAL, "f"), (self.dmp.DIFF_INSERT, "g")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "abcdef"), (self.dmp.DIFF_INSERT, "cdfg")], diffs)
# Multiple eliminations.
diffs = [(self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2"), (self.dmp.DIFF_EQUAL, "_"), (self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "AB_AB"), (self.dmp.DIFF_INSERT, "1A2_1A2")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_DELETE, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_DELETE, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# No overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef")], diffs)
# Overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "abcxxx"), (self.dmp.DIFF_INSERT, "xxxdef")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xxx"), (self.dmp.DIFF_INSERT, "def")], diffs)
# Reverse overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "xxxabc"), (self.dmp.DIFF_INSERT, "defxxx")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEqual([(self.dmp.DIFF_INSERT, "def"), (self.dmp.DIFF_EQUAL, "xxx"), (self.dmp.DIFF_DELETE, "abc")], diffs)
# Two overlap eliminations.
diffs = [(self.dmp.DIFF_DELETE, "abcd1212"), (self.dmp.DIFF_INSERT, "1212efghi"), (self.dmp.DIFF_EQUAL, "----"), (self.dmp.DIFF_DELETE, "A3"), (self.dmp.DIFF_INSERT, "3BC")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "abcd"), (self.dmp.DIFF_EQUAL, "1212"), (self.dmp.DIFF_INSERT, "efghi"), (self.dmp.DIFF_EQUAL, "----"), (self.dmp.DIFF_DELETE, "A"), (self.dmp.DIFF_EQUAL, "3"), (self.dmp.DIFF_INSERT, "BC")], diffs)
def testDiffCleanupEfficiency(self):
# Cleanup operationally trivial equalities.
self.dmp.Diff_EditCost = 4
# Null case.
diffs = []
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEqual([], diffs)
# No elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")], diffs)
# Four-edit elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xyz34")], diffs)
# Three-edit elimination.
diffs = [(self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "xcd"), (self.dmp.DIFF_INSERT, "12x34")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xy"), (self.dmp.DIFF_INSERT, "34"), (self.dmp.DIFF_EQUAL, "z"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "56")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xy34z56")], diffs)
# High cost elimination.
self.dmp.Diff_EditCost = 5
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "abwxyzcd"), (self.dmp.DIFF_INSERT, "12wxyz34")], diffs)
self.dmp.Diff_EditCost = 4
def testDiffPrettyHtml(self):
# Pretty print.
diffs = [(self.dmp.DIFF_EQUAL, "a\n"), (self.dmp.DIFF_DELETE, "<B>b</B>"), (self.dmp.DIFF_INSERT, "c&d")]
self.assertEqual("<span>a¶<br></span><del style=\"background:#ffe6e6;\"><B>b</B></del><ins style=\"background:#e6ffe6;\">c&d</ins>", self.dmp.diff_prettyHtml(diffs))
def testDiffText(self):
# Compute the source and destination texts.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy")]
self.assertEqual("jumps over the lazy", self.dmp.diff_text1(diffs))
self.assertEqual("jumped over a lazy", self.dmp.diff_text2(diffs))
def testDiffDelta(self):
# Convert a diff into delta string.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy"), (self.dmp.DIFF_INSERT, "old dog")]
text1 = self.dmp.diff_text1(diffs)
self.assertEqual("jumps over the lazy", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEqual("=4\t-1\t+ed\t=6\t-3\t+a\t=5\t+old dog", delta)
# Convert delta string into a diff.
self.assertEqual(diffs, self.dmp.diff_fromDelta(text1, delta))
# Generates error (19 != 20).
try:
self.dmp.diff_fromDelta(text1 + "x", delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (19 != 18).
try:
self.dmp.diff_fromDelta(text1[1:], delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (%c3%xy invalid Unicode).
# Note: Python 3 can decode this.
#try:
# self.dmp.diff_fromDelta("", "+%c3xy")
# self.assertFalse(True)
#except ValueError:
# # Exception expected.
# pass
# Test deltas with special characters.
diffs = [(self.dmp.DIFF_EQUAL, "\u0680 \x00 \t %"), (self.dmp.DIFF_DELETE, "\u0681 \x01 \n ^"), (self.dmp.DIFF_INSERT, "\u0682 \x02 \\ |")]
text1 = self.dmp.diff_text1(diffs)
self.assertEqual("\u0680 \x00 \t %\u0681 \x01 \n ^", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEqual("=7\t-7\t+%DA%82 %02 %5C %7C", delta)
# Convert delta string into a diff.
self.assertEqual(diffs, self.dmp.diff_fromDelta(text1, delta))
# Verify pool of unchanged characters.
diffs = [(self.dmp.DIFF_INSERT, "A-Z a-z 0-9 - _ . ! ~ * ' ( ) ; / ? : @ & = + $ , # ")]
text2 = self.dmp.diff_text2(diffs)
self.assertEqual("A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", text2)
delta = self.dmp.diff_toDelta(diffs)
self.assertEqual("+A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", delta)
# Convert delta string into a diff.
self.assertEqual(diffs, self.dmp.diff_fromDelta("", delta))
def testDiffXIndex(self):
# Translate a location in text1 to text2.
self.assertEqual(5, self.dmp.diff_xIndex([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 2))
# Translation on deletion.
self.assertEqual(1, self.dmp.diff_xIndex([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 3))
def testDiffLevenshtein(self):
# Levenshtein with trailing equality.
self.assertEqual(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")]))
# Levenshtein with leading equality.
self.assertEqual(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234")]))
# Levenshtein with middle equality.
self.assertEqual(7, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_INSERT, "1234")]))
def testDiffBisect(self):
# Normal.
a = "cat"
b = "map"
# Since the resulting diff hasn't been normalized, it would be ok if
# the insertion and deletion pairs are swapped.
# If the order changes, tweak this test as required.
self.assertEqual([(self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "m"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "t"), (self.dmp.DIFF_INSERT, "p")], self.dmp.diff_bisect(a, b, sys.maxsize))
# Timeout.
self.assertEqual([(self.dmp.DIFF_DELETE, "cat"), (self.dmp.DIFF_INSERT, "map")], self.dmp.diff_bisect(a, b, 0))
def testDiffMain(self):
# Perform a trivial diff.
# Null case.
self.assertEqual([], self.dmp.diff_main("", "", False))
# Equality.
self.assertEqual([(self.dmp.DIFF_EQUAL, "abc")], self.dmp.diff_main("abc", "abc", False))
# Simple insertion.
self.assertEqual([(self.dmp.DIFF_EQUAL, "ab"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "ab123c", False))
# Simple deletion.
self.assertEqual([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "bc")], self.dmp.diff_main("a123bc", "abc", False))
# Two insertions.
self.assertEqual([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_INSERT, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "a123b456c", False))
# Two deletions.
self.assertEqual([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("a123b456c", "abc", False))
# Perform a real diff.
# Switch off the timeout.
self.dmp.Diff_Timeout = 0
# Simple cases.
self.assertEqual([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b")], self.dmp.diff_main("a", "b", False))
self.assertEqual([(self.dmp.DIFF_DELETE, "Apple"), (self.dmp.DIFF_INSERT, "Banana"), (self.dmp.DIFF_EQUAL, "s are a"), (self.dmp.DIFF_INSERT, "lso"), (self.dmp.DIFF_EQUAL, " fruit.")], self.dmp.diff_main("Apples are a fruit.", "Bananas are also fruit.", False))
self.assertEqual([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "\u0680"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "\t"), (self.dmp.DIFF_INSERT, "\x00")], self.dmp.diff_main("ax\t", "\u0680x\x00", False))
# Overlaps.
self.assertEqual([(self.dmp.DIFF_DELETE, "1"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "y"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "2"), (self.dmp.DIFF_INSERT, "xab")], self.dmp.diff_main("1ayb2", "abxab", False))
self.assertEqual([(self.dmp.DIFF_INSERT, "xaxcx"), (self.dmp.DIFF_EQUAL, "abc"), (self.dmp.DIFF_DELETE, "y")], self.dmp.diff_main("abcy", "xaxcxabc", False))
self.assertEqual([(self.dmp.DIFF_DELETE, "ABCD"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "bcd"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "efghijklmnopqrs"), (self.dmp.DIFF_DELETE, "EFGHIJKLMNOefg")], self.dmp.diff_main("ABCDa=bcd=efghijklmnopqrsEFGHIJKLMNOefg", "a-bcd-efghijklmnopqrs", False))
# Large equality.
self.assertEqual([(self.dmp.DIFF_INSERT, " "), (self.dmp.DIFF_EQUAL,"a"), (self.dmp.DIFF_INSERT,"nd"), (self.dmp.DIFF_EQUAL," [[Pennsylvania]]"), (self.dmp.DIFF_DELETE," and [[New")], self.dmp.diff_main("a [[Pennsylvania]] and [[New", " and [[Pennsylvania]]", False))
# Timeout.
self.dmp.Diff_Timeout = 0.1 # 100ms
a = "`Twas brillig, and the slithy toves\nDid gyre and gimble in the wabe:\nAll mimsy were the borogoves,\nAnd the mome raths outgrabe.\n"
b = "I am the very model of a modern major general,\nI've information vegetable, animal, and mineral,\nI know the kings of England, and I quote the fights historical,\nFrom Marathon to Waterloo, in order categorical.\n"
# Increase the text lengths by 1024 times to ensure a timeout.
for x in range(10):
a = a + a
b = b + b
startTime = time.time()
self.dmp.diff_main(a, b)
endTime = time.time()
# Test that we took at least the timeout period.
self.assertTrue(self.dmp.Diff_Timeout <= endTime - startTime)
# Test that we didn't take forever (be forgiving).
# Theoretically this test could fail very occasionally if the
# OS task swaps or locks up for a second at the wrong moment.
self.assertTrue(self.dmp.Diff_Timeout * 2 > endTime - startTime)
self.dmp.Diff_Timeout = 0
# Test the linemode speedup.
# Must be long to pass the 100 char cutoff.
# Simple line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n" * 13
self.assertEqual(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Single line-mode.
a = "1234567890" * 13
b = "abcdefghij" * 13
self.assertEqual(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Overlap line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n"
texts_linemode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, True))
texts_textmode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, False))
self.assertEqual(texts_textmode, texts_linemode)
# Test null inputs.
try:
self.dmp.diff_main(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class MatchTest(DiffMatchPatchTest):
"""MATCH TEST FUNCTIONS"""
def testMatchAlphabet(self):
# Initialise the bitmasks for Bitap.
self.assertEqual({"a":4, "b":2, "c":1}, self.dmp.match_alphabet("abc"))
self.assertEqual({"a":37, "b":18, "c":8}, self.dmp.match_alphabet("abcaba"))
def testMatchBitap(self):
self.dmp.Match_Distance = 100
self.dmp.Match_Threshold = 0.5
# Exact matches.
self.assertEqual(5, self.dmp.match_bitap("abcdefghijk", "fgh", 5))
self.assertEqual(5, self.dmp.match_bitap("abcdefghijk", "fgh", 0))
# Fuzzy matches.
self.assertEqual(4, self.dmp.match_bitap("abcdefghijk", "efxhi", 0))
self.assertEqual(2, self.dmp.match_bitap("abcdefghijk", "cdefxyhijk", 5))
self.assertEqual(-1, self.dmp.match_bitap("abcdefghijk", "bxy", 1))
# Overflow.
self.assertEqual(2, self.dmp.match_bitap("123456789xx0", "3456789x0", 2))
self.assertEqual(0, self.dmp.match_bitap("abcdef", "xxabc", 4))
self.assertEqual(3, self.dmp.match_bitap("abcdef", "defyy", 4))
self.assertEqual(0, self.dmp.match_bitap("abcdef", "xabcdefy", 0))
# Threshold test.
self.dmp.Match_Threshold = 0.4
self.assertEqual(4, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.3
self.assertEqual(-1, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.0
self.assertEqual(1, self.dmp.match_bitap("abcdefghijk", "bcdef", 1))
self.dmp.Match_Threshold = 0.5
# Multiple select.
self.assertEqual(0, self.dmp.match_bitap("abcdexyzabcde", "abccde", 3))
self.assertEqual(8, self.dmp.match_bitap("abcdexyzabcde", "abccde", 5))
# Distance test.
self.dmp.Match_Distance = 10 # Strict location.
self.assertEqual(-1, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
self.assertEqual(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdxxefg", 1))
self.dmp.Match_Distance = 1000 # Loose location.
self.assertEqual(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
def testMatchMain(self):
# Full match.
# Shortcut matches.
self.assertEqual(0, self.dmp.match_main("abcdef", "abcdef", 1000))
self.assertEqual(-1, self.dmp.match_main("", "abcdef", 1))
self.assertEqual(3, self.dmp.match_main("abcdef", "", 3))
self.assertEqual(3, self.dmp.match_main("abcdef", "de", 3))
self.assertEqual(3, self.dmp.match_main("abcdef", "defy", 4))
self.assertEqual(0, self.dmp.match_main("abcdef", "abcdefy", 0))
# Complex match.
self.dmp.Match_Threshold = 0.7
self.assertEqual(4, self.dmp.match_main("I am the very model of a modern major general.", " that berry ", 5))
self.dmp.Match_Threshold = 0.5
# Test null inputs.
try:
self.dmp.match_main(None, None, 0)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class PatchTest(DiffMatchPatchTest):
"""PATCH TEST FUNCTIONS"""
def testPatchObj(self):
# Patch Object.
p = dmp_module.patch_obj()
p.start1 = 20
p.start2 = 21
p.length1 = 18
p.length2 = 17
p.diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, "\nlaz")]
strp = str(p)
self.assertEqual("@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n", strp)
def testPatchFromText(self):
self.assertEqual([], self.dmp.patch_fromText(""))
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n"
self.assertEqual(strp, str(self.dmp.patch_fromText(strp)[0]))
self.assertEqual("@@ -1 +1 @@\n-a\n+b\n", str(self.dmp.patch_fromText("@@ -1 +1 @@\n-a\n+b\n")[0]))
self.assertEqual("@@ -1,3 +0,0 @@\n-abc\n", str(self.dmp.patch_fromText("@@ -1,3 +0,0 @@\n-abc\n")[0]))
self.assertEqual("@@ -0,0 +1,3 @@\n+abc\n", str(self.dmp.patch_fromText("@@ -0,0 +1,3 @@\n+abc\n")[0]))
# Generates error.
try:
self.dmp.patch_fromText("Bad\nPatch\n")
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchToText(self):
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
p = self.dmp.patch_fromText(strp)
self.assertEqual(strp, self.dmp.patch_toText(p))
strp = "@@ -1,9 +1,9 @@\n-f\n+F\n oo+fooba\n@@ -7,9 +7,9 @@\n obar\n-,\n+.\n tes\n"
p = self.dmp.patch_fromText(strp)
self.assertEqual(strp, self.dmp.patch_toText(p))
def testPatchAddContext(self):
self.dmp.Patch_Margin = 4
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps over the lazy dog.")
self.assertEqual("@@ -17,12 +17,18 @@\n fox \n-jump\n+somersault\n s ov\n", str(p))
# Same, but not enough trailing context.
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEqual("@@ -17,10 +17,16 @@\n fox \n-jump\n+somersault\n s.\n", str(p))
# Same, but not enough leading context.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEqual("@@ -1,7 +1,8 @@\n Th\n-e\n+at\n qui\n", str(p))
# Same, but with ambiguity.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps. The quick brown fox crashes.")
self.assertEqual("@@ -1,27 +1,28 @@\n Th\n-e\n+at\n quick brown fox jumps. \n", str(p))
def testPatchMake(self):
# Null case.
patches = self.dmp.patch_make("", "")
self.assertEqual("", self.dmp.patch_toText(patches))
text1 = "The quick brown fox jumps over the lazy dog."
text2 = "That quick brown fox jumped over a lazy dog."
# Text2+Text1 inputs.
expectedPatch = "@@ -1,8 +1,7 @@\n Th\n-at\n+e\n qui\n@@ -21,17 +21,18 @@\n jump\n-ed\n+s\n over \n-a\n+the\n laz\n"
# The second patch must be "-21,17 +21,18", not "-22,17 +21,18" due to rolling context.
patches = self.dmp.patch_make(text2, text1)
self.assertEqual(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2 inputs.
expectedPatch = "@@ -1,11 +1,12 @@\n Th\n-e\n+at\n quick b\n@@ -22,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEqual(expectedPatch, self.dmp.patch_toText(patches))
# Diff input.
diffs = self.dmp.diff_main(text1, text2, False)
patches = self.dmp.patch_make(diffs)
self.assertEqual(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Diff inputs.
patches = self.dmp.patch_make(text1, diffs)
self.assertEqual(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2+Diff inputs (deprecated).
patches = self.dmp.patch_make(text1, text2, diffs)
self.assertEqual(expectedPatch, self.dmp.patch_toText(patches))
# Character encoding.
patches = self.dmp.patch_make("`1234567890-=[]\\;',./", "~!@#$%^&*()_+{}|:\"<>?")
self.assertEqual("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n", self.dmp.patch_toText(patches))
# Character decoding.
diffs = [(self.dmp.DIFF_DELETE, "`1234567890-=[]\\;',./"), (self.dmp.DIFF_INSERT, "~!@#$%^&*()_+{}|:\"<>?")]
self.assertEqual(diffs, self.dmp.patch_fromText("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n")[0].diffs)
# Long string with repeats.
text1 = ""
for x in range(100):
text1 += "abcdef"
text2 = text1 + "123"
expectedPatch = "@@ -573,28 +573,31 @@\n cdefabcdefabcdefabcdefabcdef\n+123\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEqual(expectedPatch, self.dmp.patch_toText(patches))
# Test null inputs.
try:
self.dmp.patch_make(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchSplitMax(self):
# Assumes that Match_MaxBits is 32.
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz01234567890", "XabXcdXefXghXijXklXmnXopXqrXstXuvXwxXyzX01X23X45X67X89X0")
self.dmp.patch_splitMax(patches)
self.assertEqual("@@ -1,32 +1,46 @@\n+X\n ab\n+X\n cd\n+X\n ef\n+X\n gh\n+X\n ij\n+X\n kl\n+X\n mn\n+X\n op\n+X\n qr\n+X\n st\n+X\n uv\n+X\n wx\n+X\n yz\n+X\n 012345\n@@ -25,13 +39,18 @@\n zX01\n+X\n 23\n+X\n 45\n+X\n 67\n+X\n 89\n+X\n 0\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdef1234567890123456789012345678901234567890123456789012345678901234567890uvwxyz", "abcdefuvwxyz")
oldToText = self.dmp.patch_toText(patches)
self.dmp.patch_splitMax(patches)
self.assertEqual(oldToText, self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("1234567890123456789012345678901234567890123456789012345678901234567890", "abc")
self.dmp.patch_splitMax(patches)
self.assertEqual("@@ -1,32 +1,4 @@\n-1234567890123456789012345678\n 9012\n@@ -29,32 +1,4 @@\n-9012345678901234567890123456\n 7890\n@@ -57,14 +1,3 @@\n-78901234567890\n+abc\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1", "abcdefghij , h : 1 , t : 1 abcdefghij , h : 1 , t : 1 abcdefghij , h : 0 , t : 1")
self.dmp.patch_splitMax(patches)
self.assertEqual("@@ -2,32 +2,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n@@ -29,32 +29,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n", self.dmp.patch_toText(patches))
def testPatchAddPadding(self):
# Both edges full.
patches = self.dmp.patch_make("", "test")
self.assertEqual("@@ -0,0 +1,4 @@\n+test\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEqual("@@ -1,8 +1,12 @@\n %01%02%03%04\n+test\n %01%02%03%04\n", self.dmp.patch_toText(patches))
# Both edges partial.
patches = self.dmp.patch_make("XY", "XtestY")
self.assertEqual("@@ -1,2 +1,6 @@\n X\n+test\n Y\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEqual("@@ -2,8 +2,12 @@\n %02%03%04X\n+test\n Y%01%02%03\n", self.dmp.patch_toText(patches))
# Both edges none.
patches = self.dmp.patch_make("XXXXYYYY", "XXXXtestYYYY")
self.assertEqual("@@ -1,8 +1,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEqual("@@ -5,8 +5,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
def testPatchApply(self):
self.dmp.Match_Distance = 1000
self.dmp.Match_Threshold = 0.5
self.dmp.Patch_DeleteThreshold = 0.5
# Null case.
patches = self.dmp.patch_make("", "")
results = self.dmp.patch_apply(patches, "Hello world.")
self.assertEqual(("Hello world.", []), results)
# Exact match.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "That quick brown fox jumped over a lazy dog.")
results = self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEqual(("That quick brown fox jumped over a lazy dog.", [True, True]), results)
# Partial match.
results = self.dmp.patch_apply(patches, "The quick red rabbit jumps over the tired tiger.")
self.assertEqual(("That quick red rabbit jumped over a tired tiger.", [True, True]), results)
# Failed match.
results = self.dmp.patch_apply(patches, "I am the very model of a modern major general.")
self.assertEqual(("I am the very model of a modern major general.", [False, False]), results)
# Big delete, small change.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x123456789012345678901234567890-----++++++++++-----123456789012345678901234567890y")
self.assertEqual(("xabcy", [True, True]), results)
# Big delete, big change 1.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEqual(("xabc12345678901234567890---------------++++++++++---------------12345678901234567890y", [False, True]), results)
# Big delete, big change 2.
self.dmp.Patch_DeleteThreshold = 0.6
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEqual(("xabcy", [True, True]), results)
self.dmp.Patch_DeleteThreshold = 0.5
# Compensate for failed patch.
self.dmp.Match_Threshold = 0.0
self.dmp.Match_Distance = 0
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz--------------------1234567890", "abcXXXXXXXXXXdefghijklmnopqrstuvwxyz--------------------1234567YYYYYYYYYY890")
results = self.dmp.patch_apply(patches, "ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567890")
self.assertEqual(("ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567YYYYYYYYYY890", [False, True]), results)
self.dmp.Match_Threshold = 0.5
self.dmp.Match_Distance = 1000
# No side effects.
patches = self.dmp.patch_make("", "test")
patchstr = self.dmp.patch_toText(patches)
results = self.dmp.patch_apply(patches, "")
self.assertEqual(patchstr, self.dmp.patch_toText(patches))
# No side effects with major delete.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "Woof")
patchstr = self.dmp.patch_toText(patches)
self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEqual(patchstr, self.dmp.patch_toText(patches))
# Edge exact match.
patches = self.dmp.patch_make("", "test")
self.dmp.patch_apply(patches, "")
self.assertEqual(("test", [True]), results)
# Near edge exact match.
patches = self.dmp.patch_make("XY", "XtestY")
results = self.dmp.patch_apply(patches, "XY")
self.assertEqual(("XtestY", [True]), results)
# Edge partial match.
patches = self.dmp.patch_make("y", "y123")
results = self.dmp.patch_apply(patches, "x")
self.assertEqual(("x123", [True]), results)
if __name__ == "__main__":
unittest.main()
| mit |
ansible/ansible | test/support/integration/plugins/module_utils/database.py | 54 | 5942 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class SQLParseError(Exception):
pass
class UnclosedQuoteError(SQLParseError):
pass
# maps a type of identifier to the maximum number of dot levels that are
# allowed to specify that identifier. For example, a database column can be
# specified by up to 4 levels: database.schema.table.column
_PG_IDENTIFIER_TO_DOT_LEVEL = dict(
database=1,
schema=2,
table=3,
column=4,
role=1,
tablespace=1,
sequence=3,
publication=1,
)
_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1)
def _find_end_quote(identifier, quote_char):
accumulate = 0
while True:
try:
quote = identifier.index(quote_char)
except ValueError:
raise UnclosedQuoteError
accumulate = accumulate + quote
try:
next_char = identifier[quote + 1]
except IndexError:
return accumulate
if next_char == quote_char:
try:
identifier = identifier[quote + 2:]
accumulate = accumulate + 2
except IndexError:
raise UnclosedQuoteError
else:
return accumulate
def _identifier_parse(identifier, quote_char):
if not identifier:
raise SQLParseError('Identifier name unspecified or unquoted trailing dot')
already_quoted = False
if identifier.startswith(quote_char):
already_quoted = True
try:
end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1
except UnclosedQuoteError:
already_quoted = False
else:
if end_quote < len(identifier) - 1:
if identifier[end_quote + 1] == '.':
dot = end_quote + 1
first_identifier = identifier[:dot]
next_identifier = identifier[dot + 1:]
further_identifiers = _identifier_parse(next_identifier, quote_char)
further_identifiers.insert(0, first_identifier)
else:
raise SQLParseError('User escaped identifiers must escape extra quotes')
else:
further_identifiers = [identifier]
if not already_quoted:
try:
dot = identifier.index('.')
except ValueError:
identifier = identifier.replace(quote_char, quote_char * 2)
identifier = ''.join((quote_char, identifier, quote_char))
further_identifiers = [identifier]
else:
if dot == 0 or dot >= len(identifier) - 1:
identifier = identifier.replace(quote_char, quote_char * 2)
identifier = ''.join((quote_char, identifier, quote_char))
further_identifiers = [identifier]
else:
first_identifier = identifier[:dot]
next_identifier = identifier[dot + 1:]
further_identifiers = _identifier_parse(next_identifier, quote_char)
first_identifier = first_identifier.replace(quote_char, quote_char * 2)
first_identifier = ''.join((quote_char, first_identifier, quote_char))
further_identifiers.insert(0, first_identifier)
return further_identifiers
def pg_quote_identifier(identifier, id_type):
identifier_fragments = _identifier_parse(identifier, quote_char='"')
if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]:
raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]))
return '.'.join(identifier_fragments)
def mysql_quote_identifier(identifier, id_type):
identifier_fragments = _identifier_parse(identifier, quote_char='`')
if (len(identifier_fragments) - 1) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]:
raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]))
special_cased_fragments = []
for fragment in identifier_fragments:
if fragment == '`*`':
special_cased_fragments.append('*')
else:
special_cased_fragments.append(fragment)
return '.'.join(special_cased_fragments)
| gpl-3.0 |
beacloudgenius/edx-platform | common/lib/xmodule/xmodule/video_module/video_module.py | 13 | 30566 | # -*- coding: utf-8 -*-
# pylint: disable=abstract-method
"""Video is ungraded Xmodule for support video content.
It's new improved video module, which support additional feature:
- Can play non-YouTube video sources via in-browser HTML5 video player.
- YouTube defaults to HTML5 mode from the start.
- Speed changes in both YouTube and non-YouTube videos happen via
in-browser HTML5 video method (when in HTML5 mode).
- Navigational subtitles can be disabled altogether via an attribute
in XML.
Examples of html5 videos for manual testing:
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.mp4
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.webm
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.ogv
"""
import copy
import json
import logging
import random
from collections import OrderedDict
from operator import itemgetter
from lxml import etree
from pkg_resources import resource_string
from django.conf import settings
from xblock.fields import ScopeIds
from xblock.runtime import KvsFieldData
from xmodule.modulestore.inheritance import InheritanceKeyValueStore, own_metadata
from xmodule.x_module import XModule, module_attr
from xmodule.editing_module import TabsEditingDescriptor
from xmodule.raw_module import EmptyDataRawDescriptor
from xmodule.xml_module import is_pointer_tag, name_to_pathname, deserialize_field
from xmodule.exceptions import NotFoundError
from .transcripts_utils import VideoTranscriptsMixin
from .video_utils import create_youtube_string, get_video_from_cdn
from .video_xfields import VideoFields
from .video_handlers import VideoStudentViewHandlers, VideoStudioViewHandlers
from xmodule.video_module import manage_video_subtitles_save
# The following import/except block for edxval is temporary measure until
# edxval is a proper XBlock Runtime Service.
#
# Here's the deal: the VideoModule should be able to take advantage of edx-val
# (https://github.com/edx/edx-val) to figure out what URL to give for video
# resources that have an edx_video_id specified. edx-val is a Django app, and
# including it causes tests to fail because we run common/lib tests standalone
# without Django dependencies. The alternatives seem to be:
#
# 1. Move VideoModule out of edx-platform.
# 2. Accept the Django dependency in common/lib.
# 3. Try to import, catch the exception on failure, and check for the existence
# of edxval_api before invoking it in the code.
# 4. Make edxval an XBlock Runtime Service
#
# (1) is a longer term goal. VideoModule should be made into an XBlock and
# extracted from edx-platform entirely. But that's expensive to do because of
# the various dependencies (like templates). Need to sort this out.
# (2) is explicitly discouraged.
# (3) is what we're doing today. The code is still functional when called within
# the context of the LMS, but does not cause failure on import when running
# standalone tests. Most VideoModule tests tend to be in the LMS anyway,
# probably for historical reasons, so we're not making things notably worse.
# (4) is one of the next items on the backlog for edxval, and should get rid
# of this particular import silliness. It's just that I haven't made one before,
# and I was worried about trying it with my deadline constraints.
try:
import edxval.api as edxval_api
except ImportError:
edxval_api = None
try:
from branding.models import BrandingInfoConfig
except ImportError:
BrandingInfoConfig = None
log = logging.getLogger(__name__)
_ = lambda text: text
class VideoModule(VideoFields, VideoTranscriptsMixin, VideoStudentViewHandlers, XModule):
"""
XML source example:
<video show_captions="true"
youtube="0.75:jNCf2gIqpeE,1.0:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg"
url_name="lecture_21_3" display_name="S19V3: Vacancies"
>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.mp4"/>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.webm"/>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.ogv"/>
</video>
"""
video_time = 0
icon_class = 'video'
# To make sure that js files are called in proper order we use numerical
# index. We do that to avoid issues that occurs in tests.
module = __name__.replace('.video_module', '', 2)
js = {
'js': [
resource_string(module, 'js/src/video/00_component.js'),
resource_string(module, 'js/src/video/00_video_storage.js'),
resource_string(module, 'js/src/video/00_resizer.js'),
resource_string(module, 'js/src/video/00_async_process.js'),
resource_string(module, 'js/src/video/00_i18n.js'),
resource_string(module, 'js/src/video/00_sjson.js'),
resource_string(module, 'js/src/video/00_iterator.js'),
resource_string(module, 'js/src/video/01_initialize.js'),
resource_string(module, 'js/src/video/025_focus_grabber.js'),
resource_string(module, 'js/src/video/02_html5_video.js'),
resource_string(module, 'js/src/video/03_video_player.js'),
resource_string(module, 'js/src/video/035_video_accessible_menu.js'),
resource_string(module, 'js/src/video/04_video_control.js'),
resource_string(module, 'js/src/video/05_video_quality_control.js'),
resource_string(module, 'js/src/video/06_video_progress_slider.js'),
resource_string(module, 'js/src/video/07_video_volume_control.js'),
resource_string(module, 'js/src/video/08_video_speed_control.js'),
resource_string(module, 'js/src/video/09_video_caption.js'),
resource_string(module, 'js/src/video/095_video_context_menu.js'),
resource_string(module, 'js/src/video/10_commands.js'),
resource_string(module, 'js/src/video/10_main.js')
]
}
css = {'scss': [
resource_string(module, 'css/video/display.scss'),
resource_string(module, 'css/video/accessible_menu.scss'),
]}
js_module_name = "Video"
def get_transcripts_for_student(self):
"""Return transcript information necessary for rendering the XModule student view.
This is more or less a direct extraction from `get_html`.
Returns:
Tuple of (track_url, transcript_language, sorted_languages)
track_url -> subtitle download url
transcript_language -> default transcript language
sorted_languages -> dictionary of available transcript languages
"""
track_url = None
if self.download_track:
if self.track:
track_url = self.track
elif self.sub or self.transcripts:
track_url = self.runtime.handler_url(self, 'transcript', 'download').rstrip('/?')
if not self.transcripts:
transcript_language = u'en'
languages = {'en': 'English'}
else:
transcript_language = self.get_default_transcript_language()
native_languages = {lang: label for lang, label in settings.LANGUAGES if len(lang) == 2}
languages = {
lang: native_languages.get(lang, display)
for lang, display in settings.ALL_LANGUAGES
if lang in self.transcripts
}
if self.sub:
languages['en'] = 'English'
# OrderedDict for easy testing of rendered context in tests
sorted_languages = sorted(languages.items(), key=itemgetter(1))
if 'table' in self.transcripts:
sorted_languages.insert(0, ('table', 'Table of Contents'))
sorted_languages = OrderedDict(sorted_languages)
return track_url, transcript_language, sorted_languages
def get_html(self):
transcript_download_format = self.transcript_download_format if not (self.download_track and self.track) else None
sources = filter(None, self.html5_sources)
download_video_link = None
branding_info = None
youtube_streams = ""
# If we have an edx_video_id, we prefer its values over what we store
# internally for download links (source, html5_sources) and the youtube
# stream.
if self.edx_video_id and edxval_api:
try:
val_profiles = ["youtube", "desktop_webm", "desktop_mp4"]
val_video_urls = edxval_api.get_urls_for_profiles(self.edx_video_id, val_profiles)
# VAL will always give us the keys for the profiles we asked for, but
# if it doesn't have an encoded video entry for that Video + Profile, the
# value will map to `None`
# add the non-youtube urls to the list of alternative sources
# use the last non-None non-youtube url as the link to download the video
for url in [val_video_urls[p] for p in val_profiles if p != "youtube"]:
if url:
if url not in sources:
sources.append(url)
if self.download_video:
download_video_link = url
# set the youtube url
if val_video_urls["youtube"]:
youtube_streams = "1.00:{}".format(val_video_urls["youtube"])
except edxval_api.ValInternalError:
# VAL raises this exception if it can't find data for the edx video ID. This can happen if the
# course data is ported to a machine that does not have the VAL data. So for now, pass on this
# exception and fallback to whatever we find in the VideoDescriptor.
log.warning("Could not retrieve information from VAL for edx Video ID: %s.", self.edx_video_id)
# If the user comes from China use China CDN for html5 videos.
# 'CN' is China ISO 3166-1 country code.
# Video caching is disabled for Studio. User_location is always None in Studio.
# CountryMiddleware disabled for Studio.
cdn_url = getattr(settings, 'VIDEO_CDN_URL', {}).get(self.system.user_location)
if getattr(self, 'video_speed_optimizations', True) and cdn_url:
branding_info = BrandingInfoConfig.get_config().get(self.system.user_location)
for index, source_url in enumerate(sources):
new_url = get_video_from_cdn(cdn_url, source_url)
if new_url:
sources[index] = new_url
# If there was no edx_video_id, or if there was no download specified
# for it, we fall back on whatever we find in the VideoDescriptor
if not download_video_link and self.download_video:
if self.source:
download_video_link = self.source
elif self.html5_sources:
download_video_link = self.html5_sources[0]
track_url, transcript_language, sorted_languages = self.get_transcripts_for_student()
# CDN_VIDEO_URLS is only to be used here and will be deleted
# TODO(ali@edx.org): Delete this after the CDN experiment has completed.
html_id = self.location.html_id()
if self.system.user_location == 'CN' and \
settings.FEATURES.get('ENABLE_VIDEO_BEACON', False) and \
html_id in getattr(settings, 'CDN_VIDEO_URLS', {}).keys():
cdn_urls = getattr(settings, 'CDN_VIDEO_URLS', {})[html_id]
cdn_exp_group, new_source = random.choice(zip(range(len(cdn_urls)), cdn_urls))
if cdn_exp_group > 0:
sources[0] = new_source
cdn_eval = True
else:
cdn_eval = False
cdn_exp_group = None
return self.system.render_template('video.html', {
'ajax_url': self.system.ajax_url + '/save_user_state',
'autoplay': settings.FEATURES.get('AUTOPLAY_VIDEOS', False),
'branding_info': branding_info,
'cdn_eval': cdn_eval,
'cdn_exp_group': cdn_exp_group,
# This won't work when we move to data that
# isn't on the filesystem
'data_dir': getattr(self, 'data_dir', None),
'display_name': self.display_name_with_default,
'end': self.end_time.total_seconds(),
'handout': self.handout,
'id': self.location.html_id(),
'show_captions': json.dumps(self.show_captions),
'download_video_link': download_video_link,
'sources': json.dumps(sources),
'speed': json.dumps(self.speed),
'general_speed': self.global_speed,
'saved_video_position': self.saved_video_position.total_seconds(),
'start': self.start_time.total_seconds(),
'sub': self.sub,
'track': track_url,
'youtube_streams': youtube_streams or create_youtube_string(self),
# TODO: Later on the value 1500 should be taken from some global
# configuration setting field.
'yt_test_timeout': 1500,
'yt_api_url': settings.YOUTUBE['API'],
'yt_test_url': settings.YOUTUBE['TEST_URL'],
'transcript_download_format': transcript_download_format,
'transcript_download_formats_list': self.descriptor.fields['transcript_download_format'].values,
'transcript_language': transcript_language,
'transcript_languages': json.dumps(sorted_languages),
'transcript_translation_url': self.runtime.handler_url(self, 'transcript', 'translation').rstrip('/?'),
'transcript_available_translations_url': self.runtime.handler_url(self, 'transcript', 'available_translations').rstrip('/?'),
})
class VideoDescriptor(VideoFields, VideoTranscriptsMixin, VideoStudioViewHandlers, TabsEditingDescriptor, EmptyDataRawDescriptor):
"""
Descriptor for `VideoModule`.
"""
module_class = VideoModule
transcript = module_attr('transcript')
tabs = [
{
'name': _("Basic"),
'template': "video/transcripts.html",
'current': True
},
{
'name': _("Advanced"),
'template': "tabs/metadata-edit-tab.html"
}
]
def __init__(self, *args, **kwargs):
"""
Mostly handles backward compatibility issues.
`source` is deprecated field.
a) If `source` exists and `source` is not `html5_sources`: show `source`
field on front-end as not-editable but clearable. Dropdown is a new
field `download_video` and it has value True.
b) If `source` is cleared it is not shown anymore.
c) If `source` exists and `source` in `html5_sources`, do not show `source`
field. `download_video` field has value True.
"""
super(VideoDescriptor, self).__init__(*args, **kwargs)
# For backwards compatibility -- if we've got XML data, parse it out and set the metadata fields
if self.data:
field_data = self._parse_video_xml(etree.fromstring(self.data))
self._field_data.set_many(self, field_data)
del self.data
self.source_visible = False
if self.source:
# If `source` field value exist in the `html5_sources` field values,
# then delete `source` field value and use value from `html5_sources` field.
if self.source in self.html5_sources:
self.source = '' # Delete source field value.
self.download_video = True
else: # Otherwise, `source` field value will be used.
self.source_visible = True
if not self.fields['download_video'].is_set_on(self):
self.download_video = True
# Set download_video field to default value if its not explicitly set for backward compatibility.
if not self.fields['download_video'].is_set_on(self):
self.download_video = self.download_video
# for backward compatibility.
# If course was existed and was not re-imported by the moment of adding `download_track` field,
# we should enable `download_track` if following is true:
if not self.fields['download_track'].is_set_on(self) and self.track:
self.download_track = True
def editor_saved(self, user, old_metadata, old_content):
"""
Used to update video values during `self`:save method from CMS.
old_metadata: dict, values of fields of `self` with scope=settings which were explicitly set by user.
old_content, same as `old_metadata` but for scope=content.
Due to nature of code flow in item.py::_save_item, before current function is called,
fields of `self` instance have been already updated, but not yet saved.
To obtain values, which were changed by user input,
one should compare own_metadata(self) and old_medatada.
Video player has two tabs, and due to nature of sync between tabs,
metadata from Basic tab is always sent when video player is edited and saved first time, for example:
{'youtube_id_1_0': u'3_yD_cEKoCk', 'display_name': u'Video', 'sub': u'3_yD_cEKoCk', 'html5_sources': []},
that's why these fields will always present in old_metadata after first save. This should be fixed.
At consequent save requests html5_sources are always sent too, disregard of their change by user.
That means that html5_sources are always in list of fields that were changed (`metadata` param in save_item).
This should be fixed too.
"""
metadata_was_changed_by_user = old_metadata != own_metadata(self)
if metadata_was_changed_by_user:
manage_video_subtitles_save(
self,
user,
old_metadata if old_metadata else None,
generate_translation=True
)
def save_with_metadata(self, user):
"""
Save module with updated metadata to database."
"""
self.save()
self.runtime.modulestore.update_item(self, user.id)
@property
def editable_metadata_fields(self):
editable_fields = super(VideoDescriptor, self).editable_metadata_fields
if self.source_visible:
editable_fields['source']['non_editable'] = True
else:
editable_fields.pop('source')
languages = [{'label': label, 'code': lang} for lang, label in settings.ALL_LANGUAGES if lang != u'en']
languages.sort(key=lambda l: l['label'])
languages.insert(0, {'label': 'Table of Contents', 'code': 'table'})
editable_fields['transcripts']['languages'] = languages
editable_fields['transcripts']['type'] = 'VideoTranslations'
editable_fields['transcripts']['urlRoot'] = self.runtime.handler_url(self, 'studio_transcript', 'translation').rstrip('/?')
editable_fields['handout']['type'] = 'FileUploader'
return editable_fields
@classmethod
def from_xml(cls, xml_data, system, id_generator):
"""
Creates an instance of this descriptor from the supplied xml_data.
This may be overridden by subclasses
xml_data: A string of xml that will be translated into data and children for
this module
system: A DescriptorSystem for interacting with external resources
id_generator is used to generate course-specific urls and identifiers
"""
xml_object = etree.fromstring(xml_data)
url_name = xml_object.get('url_name', xml_object.get('slug'))
block_type = 'video'
definition_id = id_generator.create_definition(block_type, url_name)
usage_id = id_generator.create_usage(definition_id)
if is_pointer_tag(xml_object):
filepath = cls._format_filepath(xml_object.tag, name_to_pathname(url_name))
xml_object = cls.load_file(filepath, system.resources_fs, usage_id)
system.parse_asides(xml_object, definition_id, usage_id, id_generator)
field_data = cls._parse_video_xml(xml_object, id_generator)
kvs = InheritanceKeyValueStore(initial_values=field_data)
field_data = KvsFieldData(kvs)
video = system.construct_xblock_from_class(
cls,
# We're loading a descriptor, so student_id is meaningless
# We also don't have separate notions of definition and usage ids yet,
# so we use the location for both
ScopeIds(None, block_type, definition_id, usage_id),
field_data,
)
return video
def definition_to_xml(self, resource_fs):
"""
Returns an xml string representing this module.
"""
xml = etree.Element('video')
youtube_string = create_youtube_string(self)
# Mild workaround to ensure that tests pass -- if a field
# is set to its default value, we don't need to write it out.
if youtube_string and youtube_string != '1.00:3_yD_cEKoCk':
xml.set('youtube', unicode(youtube_string))
xml.set('url_name', self.url_name)
attrs = {
'display_name': self.display_name,
'show_captions': json.dumps(self.show_captions),
'start_time': self.start_time,
'end_time': self.end_time,
'sub': self.sub,
'download_track': json.dumps(self.download_track),
'download_video': json.dumps(self.download_video),
}
for key, value in attrs.items():
# Mild workaround to ensure that tests pass -- if a field
# is set to its default value, we don't write it out.
if value:
if key in self.fields and self.fields[key].is_set_on(self):
xml.set(key, unicode(value))
for source in self.html5_sources:
ele = etree.Element('source')
ele.set('src', source)
xml.append(ele)
if self.track:
ele = etree.Element('track')
ele.set('src', self.track)
xml.append(ele)
if self.handout:
ele = etree.Element('handout')
ele.set('src', self.handout)
xml.append(ele)
# sorting for easy testing of resulting xml
for transcript_language in sorted(self.transcripts.keys()):
ele = etree.Element('transcript')
ele.set('language', transcript_language)
ele.set('src', self.transcripts[transcript_language])
xml.append(ele)
if self.edx_video_id and edxval_api:
try:
xml.append(edxval_api.export_to_xml(self.edx_video_id))
except edxval_api.ValVideoNotFoundError:
pass
return xml
def get_context(self):
"""
Extend context by data for transcript basic tab.
"""
_context = super(VideoDescriptor, self).get_context()
metadata_fields = copy.deepcopy(self.editable_metadata_fields)
display_name = metadata_fields['display_name']
video_url = metadata_fields['html5_sources']
youtube_id_1_0 = metadata_fields['youtube_id_1_0']
def get_youtube_link(video_id):
# First try a lookup in VAL. If we have a YouTube entry there, it overrides the
# one passed in.
if self.edx_video_id and edxval_api:
val_youtube_id = edxval_api.get_url_for_profile(self.edx_video_id, "youtube")
if val_youtube_id:
video_id = val_youtube_id
if video_id:
return 'http://youtu.be/{0}'.format(video_id)
else:
return ''
_ = self.runtime.service(self, "i18n").ugettext
video_url.update({
'help': _('The URL for your video. This can be a YouTube URL or a link to an .mp4, .ogg, or .webm video file hosted elsewhere on the Internet.'),
'display_name': _('Default Video URL'),
'field_name': 'video_url',
'type': 'VideoList',
'default_value': [get_youtube_link(youtube_id_1_0['default_value'])]
})
youtube_id_1_0_value = get_youtube_link(youtube_id_1_0['value'])
if youtube_id_1_0_value:
video_url['value'].insert(0, youtube_id_1_0_value)
metadata = {
'display_name': display_name,
'video_url': video_url
}
_context.update({'transcripts_basic_tab_metadata': metadata})
return _context
@classmethod
def _parse_youtube(cls, data):
"""
Parses a string of Youtube IDs such as "1.0:AXdE34_U,1.5:VO3SxfeD"
into a dictionary. Necessary for backwards compatibility with
XML-based courses.
"""
ret = {'0.75': '', '1.00': '', '1.25': '', '1.50': ''}
videos = data.split(',')
for video in videos:
pieces = video.split(':')
try:
speed = '%.2f' % float(pieces[0]) # normalize speed
# Handle the fact that youtube IDs got double-quoted for a period of time.
# Note: we pass in "VideoFields.youtube_id_1_0" so we deserialize as a String--
# it doesn't matter what the actual speed is for the purposes of deserializing.
youtube_id = deserialize_field(cls.youtube_id_1_0, pieces[1])
ret[speed] = youtube_id
except (ValueError, IndexError):
log.warning('Invalid YouTube ID: %s', video)
return ret
@classmethod
def _parse_video_xml(cls, xml, id_generator=None):
"""
Parse video fields out of xml_data. The fields are set if they are
present in the XML.
Arguments:
id_generator is used to generate course-specific urls and identifiers
"""
field_data = {}
# Convert between key types for certain attributes --
# necessary for backwards compatibility.
conversions = {
# example: 'start_time': cls._example_convert_start_time
}
# Convert between key names for certain attributes --
# necessary for backwards compatibility.
compat_keys = {
'from': 'start_time',
'to': 'end_time'
}
sources = xml.findall('source')
if sources:
field_data['html5_sources'] = [ele.get('src') for ele in sources]
track = xml.find('track')
if track is not None:
field_data['track'] = track.get('src')
handout = xml.find('handout')
if handout is not None:
field_data['handout'] = handout.get('src')
transcripts = xml.findall('transcript')
if transcripts:
field_data['transcripts'] = {tr.get('language'): tr.get('src') for tr in transcripts}
for attr, value in xml.items():
if attr in compat_keys:
attr = compat_keys[attr]
if attr in cls.metadata_to_strip + ('url_name', 'name'):
continue
if attr == 'youtube':
speeds = cls._parse_youtube(value)
for speed, youtube_id in speeds.items():
# should have made these youtube_id_1_00 for
# cleanliness, but hindsight doesn't need glasses
normalized_speed = speed[:-1] if speed.endswith('0') else speed
# If the user has specified html5 sources, make sure we don't use the default video
if youtube_id != '' or 'html5_sources' in field_data:
field_data['youtube_id_{0}'.format(normalized_speed.replace('.', '_'))] = youtube_id
elif attr in conversions:
field_data[attr] = conversions[attr](value)
elif attr not in cls.fields:
field_data.setdefault('xml_attributes', {})[attr] = value
else:
# We export values with json.dumps (well, except for Strings, but
# for about a month we did it for Strings also).
field_data[attr] = deserialize_field(cls.fields[attr], value)
# For backwards compatibility: Add `source` if XML doesn't have `download_video`
# attribute.
if 'download_video' not in field_data and sources:
field_data['source'] = field_data['html5_sources'][0]
# For backwards compatibility: if XML doesn't have `download_track` attribute,
# it means that it is an old format. So, if `track` has some value,
# `download_track` needs to have value `True`.
if 'download_track' not in field_data and track is not None:
field_data['download_track'] = True
video_asset_elem = xml.find('video_asset')
if (
edxval_api and
video_asset_elem is not None and
'edx_video_id' in field_data
):
# Allow ValCannotCreateError to escape
edxval_api.import_from_xml(
video_asset_elem,
field_data['edx_video_id'],
course_id=getattr(id_generator, 'target_course_id', None)
)
return field_data
def index_dictionary(self):
xblock_body = super(VideoDescriptor, self).index_dictionary()
video_body = {
"display_name": self.display_name,
}
def _update_transcript_for_index(language=None):
""" Find video transcript - if not found, don't update index """
try:
transcript = self.get_transcript(transcript_format='txt', lang=language)[0].replace("\n", " ")
transcript_index_name = "transcript_{}".format(language if language else self.transcript_language)
video_body.update({transcript_index_name: transcript})
except NotFoundError:
pass
if self.sub:
_update_transcript_for_index()
# check to see if there are transcripts in other languages besides default transcript
if self.transcripts:
for language in self.transcripts.keys():
_update_transcript_for_index(language)
if "content" in xblock_body:
xblock_body["content"].update(video_body)
else:
xblock_body["content"] = video_body
xblock_body["content_type"] = "Video"
return xblock_body
| agpl-3.0 |
FFMG/myoddweb.piger | monitor/api/python/Python-3.7.2/Lib/test/test_shelve.py | 8 | 6389 | import unittest
import shelve
import glob
from test import support
from collections.abc import MutableMapping
from test.test_dbm import dbm_iterator
def L1(s):
return s.decode("latin-1")
class byteskeydict(MutableMapping):
"Mapping that supports bytes keys"
def __init__(self):
self.d = {}
def __getitem__(self, key):
return self.d[L1(key)]
def __setitem__(self, key, value):
self.d[L1(key)] = value
def __delitem__(self, key):
del self.d[L1(key)]
def __len__(self):
return len(self.d)
def iterkeys(self):
for k in self.d.keys():
yield k.encode("latin-1")
__iter__ = iterkeys
def keys(self):
return list(self.iterkeys())
def copy(self):
return byteskeydict(self.d)
class TestCase(unittest.TestCase):
fn = "shelftemp.db"
def tearDown(self):
for f in glob.glob(self.fn+"*"):
support.unlink(f)
def test_close(self):
d1 = {}
s = shelve.Shelf(d1, protocol=2, writeback=False)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
self.assertEqual(len(s), 1)
s.close()
self.assertRaises(ValueError, len, s)
try:
s['key1']
except ValueError:
pass
else:
self.fail('Closed shelf should not find a key')
def test_ascii_file_shelf(self):
s = shelve.open(self.fn, protocol=0)
try:
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
finally:
s.close()
def test_binary_file_shelf(self):
s = shelve.open(self.fn, protocol=1)
try:
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
finally:
s.close()
def test_proto2_file_shelf(self):
s = shelve.open(self.fn, protocol=2)
try:
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
finally:
s.close()
def test_in_memory_shelf(self):
d1 = byteskeydict()
s = shelve.Shelf(d1, protocol=0)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
d2 = byteskeydict()
s = shelve.Shelf(d2, protocol=1)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
self.assertEqual(len(d1), 1)
self.assertEqual(len(d2), 1)
self.assertNotEqual(d1.items(), d2.items())
def test_mutable_entry(self):
d1 = byteskeydict()
s = shelve.Shelf(d1, protocol=2, writeback=False)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
s['key1'].append(5)
self.assertEqual(s['key1'], [1,2,3,4])
s.close()
d2 = byteskeydict()
s = shelve.Shelf(d2, protocol=2, writeback=True)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
s['key1'].append(5)
self.assertEqual(s['key1'], [1,2,3,4,5])
s.close()
self.assertEqual(len(d1), 1)
self.assertEqual(len(d2), 1)
def test_keyencoding(self):
d = {}
key = 'Pöp'
# the default keyencoding is utf-8
shelve.Shelf(d)[key] = [1]
self.assertIn(key.encode('utf-8'), d)
# but a different one can be given
shelve.Shelf(d, keyencoding='latin-1')[key] = [1]
self.assertIn(key.encode('latin-1'), d)
# with all consequences
s = shelve.Shelf(d, keyencoding='ascii')
self.assertRaises(UnicodeEncodeError, s.__setitem__, key, [1])
def test_writeback_also_writes_immediately(self):
# Issue 5754
d = {}
key = 'key'
encodedkey = key.encode('utf-8')
s = shelve.Shelf(d, writeback=True)
s[key] = [1]
p1 = d[encodedkey] # Will give a KeyError if backing store not updated
s['key'].append(2)
s.close()
p2 = d[encodedkey]
self.assertNotEqual(p1, p2) # Write creates new object in store
def test_with(self):
d1 = {}
with shelve.Shelf(d1, protocol=2, writeback=False) as s:
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
self.assertEqual(len(s), 1)
self.assertRaises(ValueError, len, s)
try:
s['key1']
except ValueError:
pass
else:
self.fail('Closed shelf should not find a key')
def test_default_protocol(self):
with shelve.Shelf({}) as s:
self.assertEqual(s._protocol, 3)
from test import mapping_tests
class TestShelveBase(mapping_tests.BasicTestMappingProtocol):
fn = "shelftemp.db"
counter = 0
def __init__(self, *args, **kw):
self._db = []
mapping_tests.BasicTestMappingProtocol.__init__(self, *args, **kw)
type2test = shelve.Shelf
def _reference(self):
return {"key1":"value1", "key2":2, "key3":(1,2,3)}
def _empty_mapping(self):
if self._in_mem:
x= shelve.Shelf(byteskeydict(), **self._args)
else:
self.counter+=1
x= shelve.open(self.fn+str(self.counter), **self._args)
self._db.append(x)
return x
def tearDown(self):
for db in self._db:
db.close()
self._db = []
if not self._in_mem:
for f in glob.glob(self.fn+"*"):
support.unlink(f)
class TestAsciiFileShelve(TestShelveBase):
_args={'protocol':0}
_in_mem = False
class TestBinaryFileShelve(TestShelveBase):
_args={'protocol':1}
_in_mem = False
class TestProto2FileShelve(TestShelveBase):
_args={'protocol':2}
_in_mem = False
class TestAsciiMemShelve(TestShelveBase):
_args={'protocol':0}
_in_mem = True
class TestBinaryMemShelve(TestShelveBase):
_args={'protocol':1}
_in_mem = True
class TestProto2MemShelve(TestShelveBase):
_args={'protocol':2}
_in_mem = True
def test_main():
for module in dbm_iterator():
support.run_unittest(
TestAsciiFileShelve,
TestBinaryFileShelve,
TestProto2FileShelve,
TestAsciiMemShelve,
TestBinaryMemShelve,
TestProto2MemShelve,
TestCase
)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
pilou-/ansibullbot | ansibullbot/utils/file_tools.py | 1 | 17290 | #!/usr/bin/env python
import copy
import logging
import os
import re
from fuzzywuzzy import fuzz as fw_fuzz
from textblob import TextBlob
from ansibullbot.parsers.botmetadata import BotMetadataParser
from ansibullbot.utils.systemtools import run_command
from ansibullbot.utils.moduletools import ModuleIndexer
import ansibullbot.constants as C
class FileIndexer(ModuleIndexer):
REPO = 'https://github.com/ansible/ansible'
DEFAULT_COMPONENT_MATCH = {
'supported_by': 'core',
'filename': None,
'labels': [],
'owners': [],
'notify': []
}
files = []
def __init__(self, botmetafile=None, checkoutdir=None, repo=None):
if checkoutdir is None:
self.checkoutdir = '~/.ansibullbot/cache/ansible.files.checkout'
else:
self.checkoutdir = os.path.join(checkoutdir, 'ansible.files.checkout')
self.checkoutdir = os.path.expanduser(self.checkoutdir)
if repo:
self.REPO = 'https://github.com/{}'.format(repo)
self.botmetafile = botmetafile
self.botmeta = {}
self.CMAP = {}
self.FILEMAP = {}
self.match_cache = {}
self.update(force=True)
self.email_commits = {}
def parse_metadata(self):
if self.botmetafile is not None:
with open(self.botmetafile, 'rb') as f:
rdata = f.read()
else:
fp = '.github/BOTMETA.yml'
rdata = self.get_file_content(fp)
if rdata:
self.botmeta = BotMetadataParser.parse_yaml(rdata)
else:
self.botmeta = {}
# reshape meta into old format
self.CMAP = {}
for k,v in self.botmeta.get('files', {}).items():
if not v:
continue
if 'keywords' not in v:
continue
for keyword in v['keywords']:
if keyword not in self.CMAP:
self.CMAP[keyword] = []
if k not in self.CMAP[keyword]:
self.CMAP[keyword].append(k)
# update the data
self.get_files()
self.get_filemap()
def get_files(self):
cmd = 'find %s' % self.checkoutdir
(rc, so, se) = run_command(cmd)
files = so.split('\n')
files = [x.strip() for x in files if x.strip()]
files = [x.replace(self.checkoutdir + '/', '') for x in files]
files = [x for x in files if not x.startswith('.git')]
self.files = files
def get_component_labels(self, files, valid_labels=[]):
'''Matches a filepath to the relevant c: labels'''
labels = [x for x in valid_labels if x.startswith('c:')]
clabels = []
for cl in labels:
cl = cl.replace('c:', '', 1)
al = os.path.join('lib/ansible', cl)
if al.endswith('/'):
al = al.rstrip('/')
for f in files:
if not f:
continue
if f.startswith(cl) or f.startswith(al):
clabels.append(cl)
# use the more specific labels
clabels = sorted(set(clabels))
tmp_clabels = [x for x in clabels]
for cl in clabels:
for x in tmp_clabels:
if cl != x and x.startswith(cl):
if cl in tmp_clabels:
tmp_clabels.remove(cl)
if tmp_clabels != clabels:
clabels = [x for x in tmp_clabels]
clabels = sorted(set(clabels))
# Use botmeta
ckeys = self._filenames_to_keys(files)
for ckey in ckeys:
if not self.botmeta['files'].get(ckey):
continue
ckey_labels = self.botmeta['files'][ckey].get('labels', [])
for cklabel in ckey_labels:
if cklabel in valid_labels and cklabel not in clabels:
clabels.append(cklabel)
return clabels
def _filenames_to_keys(self, filenames):
'''Match filenames to the keys in botmeta'''
ckeys = []
for filen in filenames:
# Use botmeta
if filen in self.botmeta['files']:
if filen not in ckeys:
ckeys.append(filen)
else:
for key in self.botmeta['files'].keys():
if filen.startswith(key):
ckeys.append(key)
return ckeys
def _string_to_cmap_key(self, text):
text = text.lower()
matches = []
if text.endswith('.'):
text = text.rstrip('.')
if text in self.CMAP:
matches += self.CMAP[text]
return matches
elif (text + 's') in self.CMAP:
matches += self.CMAP[text + 's']
return matches
elif text.rstrip('s') in self.CMAP:
matches += self.CMAP[text.rstrip('s')]
return matches
return matches
def get_keywords_for_file(self, filename):
keywords = []
for k, v in self.CMAP.items():
toadd = False
for x in v:
if x == filename:
toadd = True
if toadd:
keywords.append(k)
return keywords
def find_component_matches_by_file(self, filenames):
'''Make a list of component matches based on filenames'''
matches = []
for filen in filenames:
match = copy.deepcopy(self.DEFAULT_COMPONENT_MATCH)
match['filename'] = filen
ckeys = self._filenames_to_keys([filen])
ckeys = sorted(set(ckeys))
for ckey in ckeys:
cdata = self.botmeta['files'].get(ckey)
if not cdata:
continue
if 'labels' in cdata:
for label in cdata['labels']:
if label not in match['labels']:
match['labels'].append(label)
if 'support' in cdata:
match['supported_by'] = cdata['support'][0]
if 'maintainers' in cdata:
for user in cdata['maintainers']:
if user not in match['owners']:
match['owners'].append(user)
if 'notify' in cdata:
for user in cdata['notify']:
if user not in match['notify']:
match['notify'].append(user)
matches.append(match)
return matches
def find_component_match(self, title, body, template_data):
'''Make a list of matching files for arbitrary text in an issue'''
# DistributionNotFound: The 'jinja2<2.9' distribution was not found and
# is required by ansible
# File
# "/usr/lib/python2.7/site-packages/ansible/plugins/callback/foreman.py",
# line 30, in <module>
STOPWORDS = ['ansible', 'core', 'plugin']
STOPCHARS = ['"', "'", '(', ')', '?', '*', '`', ',']
matches = []
if 'Traceback (most recent call last)' in body:
lines = body.split('\n')
for line in lines:
line = line.strip()
if line.startswith('DistributionNotFound'):
matches = ['setup.py']
break
elif line.startswith('File'):
fn = line.split()[1]
for SC in STOPCHARS:
fn = fn.replace(SC, '')
if 'ansible_module_' in fn:
fn = os.path.basename(fn)
fn = fn.replace('ansible_module_', '')
matches = [fn]
elif 'cli/playbook.py' in fn:
fn = 'lib/ansible/cli/playbook.py'
elif 'module_utils' in fn:
idx = fn.find('module_utils/')
fn = 'lib/ansible/' + fn[idx:]
elif 'ansible/' in fn:
idx = fn.find('ansible/')
fn1 = fn[idx:]
if 'bin/' in fn1:
if not fn1.startswith('bin'):
idx = fn1.find('bin/')
fn1 = fn1[idx:]
if fn1.endswith('.py'):
fn1 = fn1.rstrip('.py')
elif 'cli/' in fn1:
idx = fn1.find('cli/')
fn1 = fn1[idx:]
fn1 = 'lib/ansible/' + fn1
elif 'lib' not in fn1:
fn1 = 'lib/' + fn1
if fn1 not in self.files:
if C.DEFAULT_BREAKPOINTS:
logging.error('breakpoint!')
import epdb; epdb.st()
if matches:
return matches
craws = template_data.get('component_raw')
if craws is None:
return matches
# compare to component mapping
matches = self._string_to_cmap_key(craws)
if matches:
return matches
# do not re-process the same strings over and over again
if craws.lower() in self.match_cache:
return self.match_cache[craws.lower()]
# make ngrams from largest to smallest and recheck
blob = TextBlob(craws.lower())
wordcount = len(blob.tokens) + 1
for ng_size in reversed(xrange(2, wordcount)):
ngrams = [' '.join(x) for x in blob.ngrams(ng_size)]
for ng in ngrams:
matches = self._string_to_cmap_key(ng)
if matches:
self.match_cache[craws.lower()] = matches
return matches
# https://pypi.python.org/pypi/fuzzywuzzy
matches = []
for cr in craws.lower().split('\n'):
ratios = []
for k in self.CMAP.keys():
ratio = fw_fuzz.ratio(cr, k)
ratios.append((ratio, k))
ratios = sorted(ratios, key=lambda tup: tup[0])
if ratios[-1][0] >= 90:
cnames = self.CMAP[ratios[-1][1]]
matches += cnames
if matches:
self.match_cache[craws.lower()] = matches
return matches
# try to match to repo files
if craws:
clines = craws.split('\n')
for craw in clines:
cparts = craw.replace('-', ' ')
cparts = cparts.split()
for idx, x in enumerate(cparts):
for SC in STOPCHARS:
if SC in x:
x = x.replace(SC, '')
for SW in STOPWORDS:
if x == SW:
x = ''
if x and '/' not in x:
x = '/' + x
cparts[idx] = x
cparts = [x.strip() for x in cparts if x.strip()]
for x in cparts:
for f in self.files:
if '/modules/' in f:
continue
if 'test/' in f and 'test' not in craw:
continue
if 'galaxy' in f and 'galaxy' not in body:
continue
if 'dynamic inv' in body.lower() and 'contrib' not in f:
continue
if 'inventory' in f and 'inventory' not in body.lower():
continue
if 'contrib' in f and 'inventory' not in body.lower():
continue
try:
f.endswith(x)
except UnicodeDecodeError:
continue
fname = os.path.basename(f).split('.')[0]
if f.endswith(x):
if fname.lower() in body.lower():
matches.append(f)
break
if f.endswith(x + '.py'):
if fname.lower() in body.lower():
matches.append(f)
break
if f.endswith(x + '.ps1'):
if fname.lower() in body.lower():
matches.append(f)
break
if os.path.dirname(f).endswith(x):
if fname.lower() in body.lower():
matches.append(f)
break
logging.info('%s --> %s' % (craws, sorted(set(matches))))
self.match_cache[craws.lower()] = matches
return matches
def get_filemap(self):
'''Read filemap and make re matchers'''
self.FILEMAP = {}
bfiles = self.botmeta.get('files', {})
for k, v in bfiles.items():
self.FILEMAP[k] = {}
reg = k
if reg.endswith('/'):
reg += '*'
self.FILEMAP[k] = {
#'inclusive': False,
'inclusive': True,
'exclusive': False,
'assign': [],
'notify': [],
'labels': []
}
self.FILEMAP[k]['regex'] = re.compile(reg)
if not v:
continue
if 'maintainers' in v:
self.FILEMAP[k]['maintainers'] = v['maintainers']
if 'assign' in v or 'maintainers' in v:
if 'assign' in v:
self.FILEMAP[k]['assign'] = v['assign']
if 'maintainers' in v:
self.FILEMAP[k]['assign'] += v['maintainers']
self.FILEMAP[k]['assign'] = sorted(set(self.FILEMAP[k]['assign']))
if 'notify' in v or 'maintainers' in v:
if 'notify' in v:
self.FILEMAP[k]['notify'] = v['notify']
if 'maintainers' in v:
self.FILEMAP[k]['notify'] += v['maintainers']
self.FILEMAP[k]['notify'] = sorted(set(self.FILEMAP[k]['notify']))
if 'labels' in v:
labels = v['labels']
labels = [x for x in labels if x not in ['lib', 'ansible']]
self.FILEMAP[k]['labels'] = labels
def get_filemap_labels_for_files(self, files):
'''Get expected labels from the filemap'''
labels = []
exclusive = False
for f in files:
if f is None:
continue
# only one match
if exclusive:
continue
for k, v in self.FILEMAP.iteritems():
if not v['inclusive'] and v['regex'].match(f):
labels = v['labels']
exclusive = True
break
if 'labels' not in v:
continue
if v['regex'].match(f):
for label in v['labels']:
if label not in labels:
labels.append(label)
return labels
def get_filemap_users_for_files(self, files):
'''Get expected notifiees from the filemap'''
to_notify = []
to_assign = []
exclusive = False
for f in files:
if f is None:
continue
# only one match
if exclusive:
continue
for k, v in self.FILEMAP.iteritems():
if not v['inclusive'] and v['regex'].match(f):
to_notify = v['notify']
to_assign = v['assign']
exclusive = True
break
if 'notify' not in v and 'assign' not in v:
continue
if v['regex'].match(f):
for user in v['notify']:
if user not in to_notify:
to_notify.append(user)
for user in v['assign']:
if user not in to_assign:
to_assign.append(user)
return to_notify, to_assign
def isnewdir(self, path):
if path in self.files:
return False
else:
return True
def commits_by_email(self, email):
if not isinstance(email, (list, tuple)):
email = [email]
if not self.email_commits:
cmd = 'cd {}; git log --format="%H %ae"'.format(self.checkoutdir)
(rc, so, se) = run_command(cmd)
commits = [x.split(None, 1)[::-1] for x in so.split('\n') if x]
for x in commits:
if x[0] not in self.email_commits:
self.email_commits[x[0]] = []
self.email_commits[x[0]].append(x[1])
commits = []
for x in email:
commits += self.email_commits.get(x, [])
return commits
| gpl-3.0 |
evaschalde/odoo | addons/l10n_be_invoice_bba/__openerp__.py | 312 | 2621 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Belgium - Structured Communication',
'version': '1.2',
'license': 'AGPL-3',
'author': 'Noviat',
'website': 'https://www.odoo.com/page/accounting',
'category' : 'Localization',
'description': """
Belgian localization for in- and outgoing invoices (prereq to account_coda):
============================================================================
- Rename 'reference' field labels to 'Communication'
- Add support for Belgian Structured Communication
A Structured Communication can be generated automatically on outgoing invoices according to the following algorithms:
---------------------------------------------------------------------------------------------------------------------
1) Random : +++RRR/RRRR/RRRDD+++
**R..R =** Random Digits, **DD =** Check Digits
2) Date : +++DOY/YEAR/SSSDD+++
**DOY =** Day of the Year, **SSS =** Sequence Number, **DD =** Check Digits
3) Customer Reference +++RRR/RRRR/SSSDDD+++
**R..R =** Customer Reference without non-numeric characters, **SSS =** Sequence Number, **DD =** Check Digits
The preferred type of Structured Communication and associated Algorithm can be
specified on the Partner records. A 'random' Structured Communication will
generated if no algorithm is specified on the Partner record.
""",
'depends': ['account'],
'demo': [],
'data' : [
'partner_view.xml',
'account_invoice_view.xml',
],
'auto_install': False,
'installable': True,}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
abhinavp13/IITBX-edx-platform-dev | lms/djangoapps/foldit/tests.py | 2 | 12192 | import json
import logging
from functools import partial
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.client import RequestFactory
from django.core.urlresolvers import reverse
from foldit.views import foldit_ops, verify_code
from foldit.models import PuzzleComplete, Score
from student.models import UserProfile, unique_id_for_user
from datetime import datetime, timedelta
from pytz import UTC
log = logging.getLogger(__name__)
class FolditTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.url = reverse('foldit_ops')
pwd = 'abc'
self.user = User.objects.create_user('testuser', 'test@test.com', pwd)
self.user2 = User.objects.create_user('testuser2', 'test2@test.com', pwd)
self.unique_user_id = unique_id_for_user(self.user)
self.unique_user_id2 = unique_id_for_user(self.user2)
now = datetime.now(UTC)
self.tomorrow = now + timedelta(days=1)
self.yesterday = now - timedelta(days=1)
UserProfile.objects.create(user=self.user)
UserProfile.objects.create(user=self.user2)
def make_request(self, post_data, user=None):
request = self.factory.post(self.url, post_data)
request.user = self.user if not user else user
return request
def make_puzzle_score_request(self, puzzle_ids, best_scores, user=None):
"""
Given lists of puzzle_ids and best_scores (must have same length), make a
SetPlayerPuzzleScores request and return the response.
"""
if not(type(best_scores) == list):
best_scores = [best_scores]
if not(type(puzzle_ids) == list):
puzzle_ids = [puzzle_ids]
user = self.user if not user else user
def score_dict(puzzle_id, best_score):
return {"PuzzleID": puzzle_id,
"ScoreType": "score",
"BestScore": best_score,
# current scores don't actually matter
"CurrentScore": best_score + 0.01,
"ScoreVersion": 23}
scores = [score_dict(pid, bs) for pid, bs in zip(puzzle_ids, best_scores)]
scores_str = json.dumps(scores)
verify = {"Verify": verify_code(user.email, scores_str),
"VerifyMethod": "FoldItVerify"}
data = {'SetPlayerPuzzleScoresVerify': json.dumps(verify),
'SetPlayerPuzzleScores': scores_str}
request = self.make_request(data, user)
response = foldit_ops(request)
self.assertEqual(response.status_code, 200)
return response
def test_SetPlayerPuzzleScores(self):
puzzle_id = 994391
best_score = 0.078034
response = self.make_puzzle_score_request(puzzle_id, [best_score])
self.assertEqual(response.content, json.dumps(
[{"OperationID": "SetPlayerPuzzleScores",
"Value": [{
"PuzzleID": puzzle_id,
"Status": "Success"}]}]))
# There should now be a score in the db.
top_10 = Score.get_tops_n(10, puzzle_id)
self.assertEqual(len(top_10), 1)
self.assertEqual(top_10[0]['score'], Score.display_score(best_score))
def test_SetPlayerPuzzleScores_many(self):
response = self.make_puzzle_score_request([1, 2], [0.078034, 0.080000])
self.assertEqual(response.content, json.dumps(
[{
"OperationID": "SetPlayerPuzzleScores",
"Value": [
{
"PuzzleID": 1,
"Status": "Success"
}, {
"PuzzleID": 2,
"Status": "Success"
}
]
}]
))
def test_SetPlayerPuzzleScores_multiple(self):
"""
Check that multiple posts with the same id are handled properly
(keep latest for each user, have multiple users work properly)
"""
orig_score = 0.07
puzzle_id = '1'
response = self.make_puzzle_score_request([puzzle_id], [orig_score])
# There should now be a score in the db.
top_10 = Score.get_tops_n(10, puzzle_id)
self.assertEqual(len(top_10), 1)
self.assertEqual(top_10[0]['score'], Score.display_score(orig_score))
# Reporting a better score should overwrite
better_score = 0.06
response = self.make_puzzle_score_request([1], [better_score])
top_10 = Score.get_tops_n(10, puzzle_id)
self.assertEqual(len(top_10), 1)
# Floats always get in the way, so do almostequal
self.assertAlmostEqual(
top_10[0]['score'],
Score.display_score(better_score),
delta=0.5
)
# reporting a worse score shouldn't
worse_score = 0.065
response = self.make_puzzle_score_request([1], [worse_score])
top_10 = Score.get_tops_n(10, puzzle_id)
self.assertEqual(len(top_10), 1)
# should still be the better score
self.assertAlmostEqual(
top_10[0]['score'],
Score.display_score(better_score),
delta=0.5
)
def test_SetPlayerPuzzleScores_manyplayers(self):
"""
Check that when we send scores from multiple users, the correct order
of scores is displayed. Note that, before being processed by
display_score, lower scores are better.
"""
puzzle_id = ['1']
player1_score = 0.08
player2_score = 0.02
response1 = self.make_puzzle_score_request(
puzzle_id, player1_score, self.user
)
# There should now be a score in the db.
top_10 = Score.get_tops_n(10, puzzle_id)
self.assertEqual(len(top_10), 1)
self.assertEqual(top_10[0]['score'], Score.display_score(player1_score))
response2 = self.make_puzzle_score_request(
puzzle_id, player2_score, self.user2
)
# There should now be two scores in the db
top_10 = Score.get_tops_n(10, puzzle_id)
self.assertEqual(len(top_10), 2)
# Top score should be player2_score. Second should be player1_score
self.assertAlmostEqual(
top_10[0]['score'],
Score.display_score(player2_score),
delta=0.5
)
self.assertAlmostEqual(
top_10[1]['score'],
Score.display_score(player1_score),
delta=0.5
)
# Top score user should be self.user2.username
self.assertEqual(top_10[0]['username'], self.user2.username)
def test_SetPlayerPuzzleScores_error(self):
scores = [{"PuzzleID": 994391,
"ScoreType": "score",
"BestScore": 0.078034,
"CurrentScore": 0.080035,
"ScoreVersion": 23}]
validation_str = json.dumps(scores)
verify = {"Verify": verify_code(self.user.email, validation_str),
"VerifyMethod": "FoldItVerify"}
# change the real string -- should get an error
scores[0]['ScoreVersion'] = 22
scores_str = json.dumps(scores)
data = {'SetPlayerPuzzleScoresVerify': json.dumps(verify),
'SetPlayerPuzzleScores': scores_str}
request = self.make_request(data)
response = foldit_ops(request)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response.content,
json.dumps([{
"OperationID": "SetPlayerPuzzleScores",
"Success": "false",
"ErrorString": "Verification failed",
"ErrorCode": "VerifyFailed"}]))
def make_puzzles_complete_request(self, puzzles):
"""
Make a puzzles complete request, given an array of
puzzles. E.g.
[ {"PuzzleID": 13, "Set": 1, "SubSet": 2},
{"PuzzleID": 53524, "Set": 1, "SubSet": 1} ]
"""
puzzles_str = json.dumps(puzzles)
verify = {"Verify": verify_code(self.user.email, puzzles_str),
"VerifyMethod":"FoldItVerify"}
data = {'SetPuzzlesCompleteVerify': json.dumps(verify),
'SetPuzzlesComplete': puzzles_str}
request = self.make_request(data)
response = foldit_ops(request)
self.assertEqual(response.status_code, 200)
return response
@staticmethod
def set_puzzle_complete_response(values):
return json.dumps([{"OperationID":"SetPuzzlesComplete",
"Value": values}])
def test_SetPlayerPuzzlesComplete(self):
puzzles = [ {"PuzzleID": 13, "Set": 1, "SubSet": 2},
{"PuzzleID": 53524, "Set": 1, "SubSet": 1} ]
response = self.make_puzzles_complete_request(puzzles)
self.assertEqual(response.content,
self.set_puzzle_complete_response([13, 53524]))
def test_SetPlayerPuzzlesComplete_multiple(self):
"""Check that state is stored properly"""
puzzles = [ {"PuzzleID": 13, "Set": 1, "SubSet": 2},
{"PuzzleID": 53524, "Set": 1, "SubSet": 1} ]
response = self.make_puzzles_complete_request(puzzles)
self.assertEqual(response.content,
self.set_puzzle_complete_response([13, 53524]))
puzzles = [ {"PuzzleID": 14, "Set": 1, "SubSet": 3},
{"PuzzleID": 15, "Set": 1, "SubSet": 1} ]
response = self.make_puzzles_complete_request(puzzles)
self.assertEqual(response.content,
self.set_puzzle_complete_response([13, 14, 15, 53524]))
def test_SetPlayerPuzzlesComplete_level_complete(self):
"""Check that the level complete function works"""
puzzles = [ {"PuzzleID": 13, "Set": 1, "SubSet": 2},
{"PuzzleID": 53524, "Set": 1, "SubSet": 1} ]
response = self.make_puzzles_complete_request(puzzles)
self.assertEqual(response.content,
self.set_puzzle_complete_response([13, 53524]))
puzzles = [ {"PuzzleID": 14, "Set": 1, "SubSet": 3},
{"PuzzleID": 15, "Set": 1, "SubSet": 1} ]
response = self.make_puzzles_complete_request(puzzles)
self.assertEqual(response.content,
self.set_puzzle_complete_response([13, 14, 15, 53524]))
is_complete = partial(
PuzzleComplete.is_level_complete, self.unique_user_id)
self.assertTrue(is_complete(1, 1))
self.assertTrue(is_complete(1, 3))
self.assertTrue(is_complete(1, 2))
self.assertFalse(is_complete(4, 5))
puzzles = [ {"PuzzleID": 74, "Set": 4, "SubSet": 5} ]
response = self.make_puzzles_complete_request(puzzles)
self.assertTrue(is_complete(4, 5))
# Now check due dates
self.assertTrue(is_complete(1, 1, due=self.tomorrow))
self.assertFalse(is_complete(1, 1, due=self.yesterday))
def test_SetPlayerPuzzlesComplete_error(self):
puzzles = [ {"PuzzleID": 13, "Set": 1, "SubSet": 2},
{"PuzzleID": 53524, "Set": 1, "SubSet": 1} ]
puzzles_str = json.dumps(puzzles)
verify = {"Verify": verify_code(self.user.email, puzzles_str + "x"),
"VerifyMethod":"FoldItVerify"}
data = {'SetPuzzlesCompleteVerify': json.dumps(verify),
'SetPuzzlesComplete': puzzles_str}
request = self.make_request(data)
response = foldit_ops(request)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response.content,
json.dumps([{
"OperationID": "SetPuzzlesComplete",
"Success": "false",
"ErrorString": "Verification failed",
"ErrorCode": "VerifyFailed"}]))
| agpl-3.0 |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/numpy/lib/polynomial.py | 82 | 37957 | """
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0, roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : array_like or poly1d
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : list of `m` scalars or scalar, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
weights to apply to the y-coordinates of the sample points.
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond :
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
__hash__ = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
return not self.__eq__(other)
def __setattr__(self, key, val):
raise ValueError("Attributes cannot be changed this way.")
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c', 'coef', 'coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError(
"'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
| artistic-2.0 |
sallaire/Sick-Beard | lib/jsonrpclib/jsonclass.py | 87 | 5116 | import types
import inspect
import re
import traceback
from lib.jsonrpclib import config
iter_types = [
types.DictType,
types.ListType,
types.TupleType
]
string_types = [
types.StringType,
types.UnicodeType
]
numeric_types = [
types.IntType,
types.LongType,
types.FloatType
]
value_types = [
types.BooleanType,
types.NoneType
]
supported_types = iter_types+string_types+numeric_types+value_types
invalid_module_chars = r'[^a-zA-Z0-9\_\.]'
class TranslationError(Exception):
pass
def dump(obj, serialize_method=None, ignore_attribute=None, ignore=[]):
if not serialize_method:
serialize_method = config.serialize_method
if not ignore_attribute:
ignore_attribute = config.ignore_attribute
obj_type = type(obj)
# Parse / return default "types"...
if obj_type in numeric_types+string_types+value_types:
return obj
if obj_type in iter_types:
if obj_type in (types.ListType, types.TupleType):
new_obj = []
for item in obj:
new_obj.append(dump(item, serialize_method,
ignore_attribute, ignore))
if obj_type is types.TupleType:
new_obj = tuple(new_obj)
return new_obj
# It's a dict...
else:
new_obj = {}
for key, value in obj.iteritems():
new_obj[key] = dump(value, serialize_method,
ignore_attribute, ignore)
return new_obj
# It's not a standard type, so it needs __jsonclass__
module_name = inspect.getmodule(obj).__name__
class_name = obj.__class__.__name__
json_class = class_name
if module_name not in ['', '__main__']:
json_class = '%s.%s' % (module_name, json_class)
return_obj = {"__jsonclass__":[json_class,]}
# If a serialization method is defined..
if serialize_method in dir(obj):
# Params can be a dict (keyword) or list (positional)
# Attrs MUST be a dict.
serialize = getattr(obj, serialize_method)
params, attrs = serialize()
return_obj['__jsonclass__'].append(params)
return_obj.update(attrs)
return return_obj
# Otherwise, try to figure it out
# Obviously, we can't assume to know anything about the
# parameters passed to __init__
return_obj['__jsonclass__'].append([])
attrs = {}
ignore_list = getattr(obj, ignore_attribute, [])+ignore
for attr_name, attr_value in obj.__dict__.iteritems():
if type(attr_value) in supported_types and \
attr_name not in ignore_list and \
attr_value not in ignore_list:
attrs[attr_name] = dump(attr_value, serialize_method,
ignore_attribute, ignore)
return_obj.update(attrs)
return return_obj
def load(obj):
if type(obj) in string_types+numeric_types+value_types:
return obj
if type(obj) is types.ListType:
return_list = []
for entry in obj:
return_list.append(load(entry))
return return_list
# Othewise, it's a dict type
if '__jsonclass__' not in obj.keys():
return_dict = {}
for key, value in obj.iteritems():
new_value = load(value)
return_dict[key] = new_value
return return_dict
# It's a dict, and it's a __jsonclass__
orig_module_name = obj['__jsonclass__'][0]
params = obj['__jsonclass__'][1]
if orig_module_name == '':
raise TranslationError('Module name empty.')
json_module_clean = re.sub(invalid_module_chars, '', orig_module_name)
if json_module_clean != orig_module_name:
raise TranslationError('Module name %s has invalid characters.' %
orig_module_name)
json_module_parts = json_module_clean.split('.')
json_class = None
if len(json_module_parts) == 1:
# Local class name -- probably means it won't work
if json_module_parts[0] not in config.classes.keys():
raise TranslationError('Unknown class or module %s.' %
json_module_parts[0])
json_class = config.classes[json_module_parts[0]]
else:
json_class_name = json_module_parts.pop()
json_module_tree = '.'.join(json_module_parts)
try:
temp_module = __import__(json_module_tree)
except ImportError:
raise TranslationError('Could not import %s from module %s.' %
(json_class_name, json_module_tree))
json_class = getattr(temp_module, json_class_name)
# Creating the object...
new_obj = None
if type(params) is types.ListType:
new_obj = json_class(*params)
elif type(params) is types.DictType:
new_obj = json_class(**params)
else:
raise TranslationError('Constructor args must be a dict or list.')
for key, value in obj.iteritems():
if key == '__jsonclass__':
continue
setattr(new_obj, key, value)
return new_obj
| gpl-3.0 |
MTDEV-KERNEL/abenagiel-android_kernel_fih_msm7x30 | scripts/build-all.py | 282 | 8889 | #! /usr/bin/env python
# Copyright (c) 2009, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Code Aurora nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': 'arm-none-linux-gnueabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
for n in glob.glob('arch/arm/configs/msm[0-9]*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/qsd*_defconfig'):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
shutil.copyfile(defconfig, dotconfig)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
build = Builder(log_name)
result = build.run(['make', 'O=%s' % dest_dir] + make_command)
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" % (target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
shutil.copyfile(dotconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
global make_command
make_command = ["oldconfig"]
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
miing/mci_migo_packages_gargoyle | docs/django_settings.py | 9 | 2903 | import os.path
import sys
# Django settings for example_project project.
DEBUG = True
TEMPLATE_DEBUG = True
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
INTERNAL_IPS = ('127.0.0.1',)
MANAGERS = ADMINS
PROJECT_ROOT = os.path.dirname(__file__)
sys.path.insert(0, os.path.abspath(os.path.join(PROJECT_ROOT, '..')))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'gargoyle', # Or path to database file if using sqlite3.
'USER': 'postgres', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Los_Angeles'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/admin/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = ')*)&8a36)6%74e@-ne5(-!8a(vv#tkv)(eyg&@0=zd^pl!7=y@'
ROOT_URLCONF = 'example_project.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'nexus',
'gargoyle',
'south',
)
| apache-2.0 |
darktears/chromium-crosswalk | third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/coverage/backward.py | 64 | 4324 | """Add things to old Pythons so I can pretend they are newer."""
# This file does lots of tricky stuff, so disable a bunch of lintisms.
# pylint: disable=F0401,W0611,W0622
# F0401: Unable to import blah
# W0611: Unused import blah
# W0622: Redefining built-in blah
import os, sys
# Python 2.3 doesn't have `set`
try:
set = set # new in 2.4
except NameError:
from sets import Set as set
# Python 2.3 doesn't have `sorted`.
try:
sorted = sorted
except NameError:
def sorted(iterable):
"""A 2.3-compatible implementation of `sorted`."""
lst = list(iterable)
lst.sort()
return lst
# Pythons 2 and 3 differ on where to get StringIO
try:
from cStringIO import StringIO
BytesIO = StringIO
except ImportError:
from io import StringIO, BytesIO
# What's a string called?
try:
string_class = basestring
except NameError:
string_class = str
# Where do pickles come from?
try:
import cPickle as pickle
except ImportError:
import pickle
# range or xrange?
try:
range = xrange
except NameError:
range = range
# Exec is a statement in Py2, a function in Py3
if sys.version_info >= (3, 0):
def exec_code_object(code, global_map):
"""A wrapper around exec()."""
exec(code, global_map)
else:
# OK, this is pretty gross. In Py2, exec was a statement, but that will
# be a syntax error if we try to put it in a Py3 file, even if it is never
# executed. So hide it inside an evaluated string literal instead.
eval(
compile(
"def exec_code_object(code, global_map):\n"
" exec code in global_map\n",
"<exec_function>", "exec"
)
)
# ConfigParser was renamed to the more-standard configparser
try:
import configparser
except ImportError:
import ConfigParser as configparser
# Python 3.2 provides `tokenize.open`, the best way to open source files.
import tokenize
try:
open_source = tokenize.open # pylint: disable=E1101
except AttributeError:
try:
detect_encoding = tokenize.detect_encoding # pylint: disable=E1101
except AttributeError:
def open_source(fname):
"""Open a source file the best way."""
return open(fname, "rU")
else:
from io import TextIOWrapper
# Copied from the 3.2 stdlib:
def open_source(fname):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = open(fname, 'rb')
encoding, _ = detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = 'r'
return text
# Python 3.x is picky about bytes and strings, so provide methods to
# get them right, and make them no-ops in 2.x
if sys.version_info >= (3, 0):
def to_bytes(s):
"""Convert string `s` to bytes."""
return s.encode('utf8')
def to_string(b):
"""Convert bytes `b` to a string."""
return b.decode('utf8')
else:
def to_bytes(s):
"""Convert string `s` to bytes (no-op in 2.x)."""
return s
def to_string(b):
"""Convert bytes `b` to a string (no-op in 2.x)."""
return b
# A few details about writing encoded text are different in 2.x and 3.x.
if sys.version_info >= (3, 0):
def write_encoded(fname, text, encoding='utf8', errors='strict'):
'''Write string `text` to file names `fname`, with encoding.'''
# Don't use "with", so that this file is still good for old 2.x.
f = open(fname, 'w', encoding=encoding, errors=errors)
try:
f.write(text)
finally:
f.close()
else:
# It's not clear that using utf8 strings in 2.x is the right thing to do.
def write_encoded(fname, text, encoding='utf8', errors='strict'):
'''Write utf8 string `text` to file names `fname`, with encoding.'''
import codecs
f = codecs.open(fname, 'w', encoding=encoding, errors=errors)
try:
f.write(text.decode('utf8'))
finally:
f.close()
# Md5 is available in different places.
try:
import hashlib
md5 = hashlib.md5
except ImportError:
import md5
md5 = md5.new
| bsd-3-clause |
horacioMartinez/dakara-client | tools/misc/traductor de indices y mapas/mapas/conversor_mapas.py | 1 | 3414 | import json
import struct
import os
extra_info = [0 for x in range(10000)]
extra_data = open("./extra_data", "r")
for line in extra_data:
line = line.rstrip()
numeroMapa = line.split('=')[0]
data = line.split('=')[1]
extra_info[int(numeroMapa)] = data
def getExtraData (mapa):
return extra_info[mapa]
for fn in os.listdir('.'):
if not os.path.isfile(fn):
continue
if not fn.endswith(".map"):
continue
origen = open(fn, "rb")
numeroMapa = fn[4:fn.find('.')]
fileDest = "mapas_json/" + "mapa" + numeroMapa + '.json'
destino = open(fileDest,"w")
#if fileDest != "mapas_json/mapa1.json":
# continue
print fileDest
origen.read(256+17) # saco header
mapa = [[[0 for y in range(6)] for y in range(101)] for y in range(101)]
x = 1
while ( x < 101):
y = 1
while (y < 101):
flags = struct.unpack('<B', (origen.read(1)))[0] # cant layers
if (flags & 1):
bloqueado=1
else:
bloqueado=0
layer1 = struct.unpack('<H', (origen.read(2)))[0] # l1
if (flags & 2):
layer2 = struct.unpack('<H', (origen.read(2)))[0] # l1
else:
layer2=0
if (flags & 4):
layer3 = struct.unpack('<H', (origen.read(2)))[0] # l1
else:
layer3=0
if (flags & 8):
layer4 = struct.unpack('<H', (origen.read(2)))[0] # l1
else:
layer4=0
if (flags & 16):
trigger = struct.unpack('<H', (origen.read(2)))[0] # l1
if ( (trigger != 1) and (trigger != 4) ):
trigger = 0
else:
trigger=0
mapa[y][x][0] = bloqueado
mapa[y][x][1] = layer1
mapa[y][x][2] = layer2
mapa[y][x][3] = layer3
mapa[y][x][4] = layer4
mapa[y][x][5] = trigger
y = y+1
x = x+1
x = 1
# datos extra ###################### desactivado #############################
#destino.write("{")
#destino.write(getExtraData(int(numeroMapa)))
# Layers
destino.write(",")
destino.write('"layers":')
destino.write("[")
while ( x < 101):
y = 1
destino.write("[")
while (y < 101):
destino.write("{")
mapa[x][y][0] = mapa[x][y][0]
if mapa[x][y][0] != 0:
destino.write(""""0":""")
destino.write(str(mapa[x][y][0]))
#mapa[x][y][1] = mapa[x][y][1]
if mapa[x][y][1] != 0:
if mapa[x][y][0] != 0:
destino.write(",")
destino.write(""""1":""")
destino.write(str(mapa[x][y][1]))
#mapa[x][y][2] = mapa[x][y][2]
if mapa[x][y][2] != 0:
if ( (mapa[x][y][0] != 0) or (mapa[x][y][1] != 0)):
destino.write(",")
destino.write(""""2":""")
destino.write(str(mapa[x][y][2]))
#mapa[x][y][3] = mapa[x][y][3]
if mapa[x][y][3] != 0:
if ( (mapa[x][y][0] != 0 or mapa[x][y][1] != 0) or mapa[x][y][2] != 0):
destino.write(",")
destino.write(""""3":""")
destino.write(str(mapa[x][y][3]))
#mapa[x][y][4] = layer4
if mapa[x][y][4] != 0:
if ( ((mapa[x][y][0] != 0 or mapa[x][y][1] != 0) or mapa[x][y][2] != 0) or mapa[x][y][3] != 0):
destino.write(",")
destino.write(""""4":""")
destino.write(str(mapa[x][y][4]))
if mapa[x][y][5] != 0:
if ((((mapa[x][y][0] != 0 or mapa[x][y][1] != 0) or mapa[x][y][2] != 0) or mapa[x][y][3] != 0) or mapa[x][y][4] != 0):
destino.write(",")
destino.write(""""5":""")
destino.write(str(mapa[x][y][5]))
if y==100:
destino.write("}")
else:
destino.write("},")
y = y+1
if x==100:
destino.write("]")
else:
destino.write("],")
x = x+1
destino.write("]")
destino.write("}")
| mit |
anryko/ansible | lib/ansible/modules/network/vyos/vyos_interfaces.py | 20 | 27002 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The module file for vyos_interfaces
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'
}
DOCUMENTATION = """
---
module: vyos_interfaces
version_added: 2.9
short_description: Manages interface attributes of VyOS network devices.
description:
- This module manages the interface attributes on VyOS network devices.
- This module supports managing base attributes of Ethernet, Bonding,
VXLAN, Loopback and Virtual Tunnel Interfaces.
notes:
- Tested against VyOS 1.1.8 (helium).
- This module works with connection C(network_cli). See L(the VyOS OS Platform Options,../network/user_guide/platform_vyos.html).
author: Nilashish Chakraborty (@nilashishc)
options:
config:
description: The provided interfaces configuration.
type: list
suboptions:
name:
description:
- Full name of the interface, e.g. eth0, eth1, bond0, vti1, vxlan2.
type: str
required: True
description:
description:
- Interface description.
type: str
duplex:
description:
- Interface duplex mode.
- Applicable for Ethernet interfaces only.
choices: ['full', 'half', 'auto']
type: str
enabled:
default: True
description:
- Administrative state of the interface.
- Set the value to C(true) to administratively enable
the interface or C(false) to disable it.
type: bool
mtu:
description:
- MTU for a specific interface. Refer to vendor documentation for valid values.
- Applicable for Ethernet, Bonding, VXLAN and Virtual Tunnel interfaces.
type: int
speed:
description:
- Interface link speed.
- Applicable for Ethernet interfaces only.
type: str
choices: ['auto', '10', '100', '1000', '2500', '10000']
vifs:
description:
- Virtual sub-interfaces related configuration.
- 802.1Q VLAN interfaces are represented as virtual sub-interfaces in VyOS.
type: list
suboptions:
vlan_id:
description:
- Identifier for the virtual sub-interface.
type: int
description:
description:
- Virtual sub-interface description.
type: str
enabled:
description:
- Administrative state of the virtual sub-interface.
- Set the value to C(true) to administratively enable
the interface or C(false) to disable it.
type: bool
default: True
mtu:
description:
- MTU for the virtual sub-interface.
- Refer to vendor documentation for valid values.
type: int
state:
description:
- The state of the configuration after module completion.
type: str
choices:
- merged
- replaced
- overridden
- deleted
default: merged
"""
EXAMPLES = """
# Using merged
#
# -------------
# Before state:
# -------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces loopback lo
- name: Merge provided configuration with device configuration
vyos_interfaces:
config:
- name: eth2
description: 'Configured by Ansible'
enabled: True
vifs:
- vlan_id: 200
description: "VIF 200 - ETH2"
- name: eth3
description: 'Configured by Ansible'
mtu: 1500
- name: bond1
description: 'Bond - 1'
mtu: 1200
- name: vti2
description: 'VTI - 2'
enabled: false
state: merged
#
#
# -------------------------
# Module Execution Result
# -------------------------
#
# "before": [
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "enabled": true,
# "name": "eth3"
# },
# {
# "enabled": true,
# "name": "eth2"
# },
# {
# "enabled": true,
# "name": "eth1"
# },
# {
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
# "commands": [
# "set interfaces ethernet eth2 description 'Configured by Ansible'",
# "set interfaces ethernet eth2 vif 200",
# "set interfaces ethernet eth2 vif 200 description 'VIF 200 - ETH2'",
# "set interfaces ethernet eth3 description 'Configured by Ansible'",
# "set interfaces ethernet eth3 mtu '1500'",
# "set interfaces bonding bond1",
# "set interfaces bonding bond1 description 'Bond - 1'",
# "set interfaces bonding bond1 mtu '1200'",
# "set interfaces vti vti2",
# "set interfaces vti vti2 description 'VTI - 2'",
# "set interfaces vti vti2 disable"
# ]
#
# "after": [
# {
# "description": "Bond - 1",
# "enabled": true,
# "mtu": 1200,
# "name": "bond1"
# },
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "description": "VTI - 2",
# "enabled": false,
# "name": "vti2"
# },
# {
# "description": "Configured by Ansible",
# "enabled": true,
# "mtu": 1500,
# "name": "eth3"
# },
# {
# "description": "Configured by Ansible",
# "enabled": true,
# "name": "eth2",
# "vifs": [
# {
# "description": "VIF 200 - ETH2",
# "enabled": true,
# "vlan_id": "200"
# }
# ]
# },
# {
# "enabled": true,
# "name": "eth1"
# },
# {
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
#
# -------------
# After state:
# -------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces bonding bond1 description 'Bond - 1'
# set interfaces bonding bond1 mtu '1200'
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth2 description 'Configured by Ansible'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth2 vif 200 description 'VIF 200 - ETH2'
# set interfaces ethernet eth3 description 'Configured by Ansible'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces ethernet eth3 mtu '1500'
# set interfaces loopback lo
# set interfaces vti vti2 description 'VTI - 2'
# set interfaces vti vti2 disable
#
# Using replaced
#
# -------------
# Before state:
# -------------
#
# vyos:~$ show configuration commands | grep eth
# set interfaces bonding bond1 description 'Bond - 1'
# set interfaces bonding bond1 mtu '1400'
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 description 'Management Interface for the Appliance'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:f3:6c:b5'
# set interfaces ethernet eth0 smp_affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 description 'Configured by Ansible Eng Team'
# set interfaces ethernet eth1 duplex 'full'
# set interfaces ethernet eth1 hw-id '08:00:27:ad:ef:65'
# set interfaces ethernet eth1 smp_affinity 'auto'
# set interfaces ethernet eth1 speed '100'
# set interfaces ethernet eth2 description 'Configured by Ansible'
# set interfaces ethernet eth2 duplex 'full'
# set interfaces ethernet eth2 hw-id '08:00:27:ab:4e:79'
# set interfaces ethernet eth2 mtu '500'
# set interfaces ethernet eth2 smp_affinity 'auto'
# set interfaces ethernet eth2 speed '100'
# set interfaces ethernet eth2 vif 200 description 'Configured by Ansible'
# set interfaces ethernet eth3 description 'Configured by Ansible'
# set interfaces ethernet eth3 duplex 'full'
# set interfaces ethernet eth3 hw-id '08:00:27:17:3c:85'
# set interfaces ethernet eth3 mtu '1500'
# set interfaces ethernet eth3 smp_affinity 'auto'
# set interfaces ethernet eth3 speed '100'
# set interfaces loopback lo
#
#
- name: Replace device configurations of listed interfaces with provided configurations
vyos_interfaces:
config:
- name: eth2
description: "Replaced by Ansible"
- name: eth3
description: "Replaced by Ansible"
- name: eth1
description: "Replaced by Ansible"
state: replaced
#
#
# -----------------------
# Module Execution Result
# -----------------------
#
# "before": [
# {
# "description": "Bond - 1",
# "enabled": true,
# "mtu": 1400,
# "name": "bond1"
# },
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "description": "Configured by Ansible",
# "duplex": "full",
# "enabled": true,
# "mtu": 1500,
# "name": "eth3",
# "speed": "100"
# },
# {
# "description": "Configured by Ansible",
# "duplex": "full",
# "enabled": true,
# "mtu": 500,
# "name": "eth2",
# "speed": "100",
# "vifs": [
# {
# "description": "VIF 200 - ETH2",
# "enabled": true,
# "vlan_id": "200"
# }
# ]
# },
# {
# "description": "Configured by Ansible Eng Team",
# "duplex": "full",
# "enabled": true,
# "name": "eth1",
# "speed": "100"
# },
# {
# "description": "Management Interface for the Appliance",
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
# "commands": [
# "delete interfaces ethernet eth2 speed",
# "delete interfaces ethernet eth2 duplex",
# "delete interfaces ethernet eth2 mtu",
# "delete interfaces ethernet eth2 vif 200 description",
# "set interfaces ethernet eth2 description 'Replaced by Ansible'",
# "delete interfaces ethernet eth3 speed",
# "delete interfaces ethernet eth3 duplex",
# "delete interfaces ethernet eth3 mtu",
# "set interfaces ethernet eth3 description 'Replaced by Ansible'",
# "delete interfaces ethernet eth1 speed",
# "delete interfaces ethernet eth1 duplex",
# "set interfaces ethernet eth1 description 'Replaced by Ansible'"
# ]
#
# "after": [
# {
# "description": "Bond - 1",
# "enabled": true,
# "mtu": 1400,
# "name": "bond1"
# },
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "description": "Replaced by Ansible",
# "enabled": true,
# "name": "eth3"
# },
# {
# "description": "Replaced by Ansible",
# "enabled": true,
# "name": "eth2",
# "vifs": [
# {
# "enabled": true,
# "vlan_id": "200"
# }
# ]
# },
# {
# "description": "Replaced by Ansible",
# "enabled": true,
# "name": "eth1"
# },
# {
# "description": "Management Interface for the Appliance",
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
#
# -------------
# After state:
# -------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces bonding bond1 description 'Bond - 1'
# set interfaces bonding bond1 mtu '1400'
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 description 'Management Interface for the Appliance'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 description 'Replaced by Ansible'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth2 description 'Replaced by Ansible'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth2 vif 200
# set interfaces ethernet eth3 description 'Replaced by Ansible'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces loopback lo
#
#
# Using overridden
#
#
# --------------
# Before state
# --------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 description 'Ethernet Interface - 0'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 mtu '1200'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 description 'Configured by Ansible Eng Team'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 mtu '100'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth1 vif 100 description 'VIF 100 - ETH1'
# set interfaces ethernet eth1 vif 100 disable
# set interfaces ethernet eth2 description 'Configured by Ansible Team (Admin Down)'
# set interfaces ethernet eth2 disable
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 mtu '600'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth3 description 'Configured by Ansible Network'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces loopback lo
# set interfaces vti vti1 description 'Virtual Tunnel Interface - 1'
# set interfaces vti vti1 mtu '68'
#
#
- name: Overrides all device configuration with provided configuration
vyos_interfaces:
config:
- name: eth0
description: Outbound Interface For The Appliance
speed: auto
duplex: auto
- name: eth2
speed: auto
duplex: auto
- name: eth3
mtu: 1200
state: overridden
#
#
# ------------------------
# Module Execution Result
# ------------------------
#
# "before": [
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "description": "Virtual Tunnel Interface - 1",
# "enabled": true,
# "mtu": 68,
# "name": "vti1"
# },
# {
# "description": "Configured by Ansible Network",
# "enabled": true,
# "name": "eth3"
# },
# {
# "description": "Configured by Ansible Team (Admin Down)",
# "enabled": false,
# "mtu": 600,
# "name": "eth2"
# },
# {
# "description": "Configured by Ansible Eng Team",
# "enabled": true,
# "mtu": 100,
# "name": "eth1",
# "vifs": [
# {
# "description": "VIF 100 - ETH1",
# "enabled": false,
# "vlan_id": "100"
# }
# ]
# },
# {
# "description": "Ethernet Interface - 0",
# "duplex": "auto",
# "enabled": true,
# "mtu": 1200,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
# "commands": [
# "delete interfaces vti vti1 description",
# "delete interfaces vti vti1 mtu",
# "delete interfaces ethernet eth1 description",
# "delete interfaces ethernet eth1 mtu",
# "delete interfaces ethernet eth1 vif 100 description",
# "delete interfaces ethernet eth1 vif 100 disable",
# "delete interfaces ethernet eth0 mtu",
# "set interfaces ethernet eth0 description 'Outbound Interface For The Appliance'",
# "delete interfaces ethernet eth2 description",
# "delete interfaces ethernet eth2 mtu",
# "set interfaces ethernet eth2 duplex 'auto'",
# "delete interfaces ethernet eth2 disable",
# "set interfaces ethernet eth2 speed 'auto'",
# "delete interfaces ethernet eth3 description",
# "set interfaces ethernet eth3 mtu '1200'"
# ],
#
# "after": [
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "enabled": true,
# "name": "vti1"
# },
# {
# "enabled": true,
# "mtu": 1200,
# "name": "eth3"
# },
# {
# "duplex": "auto",
# "enabled": true,
# "name": "eth2",
# "speed": "auto"
# },
# {
# "enabled": true,
# "name": "eth1",
# "vifs": [
# {
# "enabled": true,
# "vlan_id": "100"
# }
# ]
# },
# {
# "description": "Outbound Interface For The Appliance",
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
#
# ------------
# After state
# ------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 description 'Outbound Interface For The Appliance'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth1 vif 100
# set interfaces ethernet eth2 duplex 'auto'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth2 speed 'auto'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces ethernet eth3 mtu '1200'
# set interfaces loopback lo
# set interfaces vti vti1
#
#
# Using deleted
#
#
# -------------
# Before state
# -------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces bonding bond0 mtu '1300'
# set interfaces bonding bond1 description 'LAG - 1'
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 description 'Outbound Interface for this appliance'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 description 'Configured by Ansible Network'
# set interfaces ethernet eth1 duplex 'full'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth1 speed '100'
# set interfaces ethernet eth2 description 'Configured by Ansible'
# set interfaces ethernet eth2 disable
# set interfaces ethernet eth2 duplex 'full'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 mtu '600'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth2 speed '100'
# set interfaces ethernet eth3 description 'Configured by Ansible Network'
# set interfaces ethernet eth3 duplex 'full'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces ethernet eth3 speed '100'
# set interfaces loopback lo
#
#
- name: Delete attributes of given interfaces (Note - This won't delete the interfaces themselves)
vyos_interfaces:
config:
- name: bond1
- name: eth1
- name: eth2
- name: eth3
state: deleted
#
#
# ------------------------
# Module Execution Results
# ------------------------
#
# "before": [
# {
# "enabled": true,
# "mtu": 1300,
# "name": "bond0"
# },
# {
# "description": "LAG - 1",
# "enabled": true,
# "name": "bond1"
# },
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "description": "Configured by Ansible Network",
# "duplex": "full",
# "enabled": true,
# "name": "eth3",
# "speed": "100"
# },
# {
# "description": "Configured by Ansible",
# "duplex": "full",
# "enabled": false,
# "mtu": 600,
# "name": "eth2",
# "speed": "100"
# },
# {
# "description": "Configured by Ansible Network",
# "duplex": "full",
# "enabled": true,
# "name": "eth1",
# "speed": "100"
# },
# {
# "description": "Outbound Interface for this appliance",
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
# "commands": [
# "delete interfaces bonding bond1 description",
# "delete interfaces ethernet eth1 speed",
# "delete interfaces ethernet eth1 duplex",
# "delete interfaces ethernet eth1 description",
# "delete interfaces ethernet eth2 speed",
# "delete interfaces ethernet eth2 disable",
# "delete interfaces ethernet eth2 duplex",
# "delete interfaces ethernet eth2 disable",
# "delete interfaces ethernet eth2 description",
# "delete interfaces ethernet eth2 disable",
# "delete interfaces ethernet eth2 mtu",
# "delete interfaces ethernet eth2 disable",
# "delete interfaces ethernet eth3 speed",
# "delete interfaces ethernet eth3 duplex",
# "delete interfaces ethernet eth3 description"
# ]
#
# "after": [
# {
# "enabled": true,
# "mtu": 1300,
# "name": "bond0"
# },
# {
# "enabled": true,
# "name": "bond1"
# },
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "enabled": true,
# "name": "eth3"
# },
# {
# "enabled": true,
# "name": "eth2"
# },
# {
# "enabled": true,
# "name": "eth1"
# },
# {
# "description": "Outbound Interface for this appliance",
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
#
# ------------
# After state
# ------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces bonding bond0 mtu '1300'
# set interfaces bonding bond1
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 description 'Outbound Interface for this appliance'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces loopback lo
#
#
"""
RETURN = """
before:
description: The configuration as structured data prior to module invocation.
returned: always
sample: >
The configuration returned will always be in the same format
of the parameters above.
type: list
after:
description: The configuration as structured data after module completion.
returned: when changed
sample: >
The configuration returned will always be in the same format
of the parameters above.
type: list
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample:
- 'set interfaces ethernet eth1 mtu 1200'
- 'set interfaces ethernet eth2 vif 100 description VIF 100'
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.vyos.argspec.interfaces.interfaces import InterfacesArgs
from ansible.module_utils.network.vyos.config.interfaces.interfaces import Interfaces
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
module = AnsibleModule(argument_spec=InterfacesArgs.argument_spec,
supports_check_mode=True)
result = Interfaces(module).execute_module()
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
peraktong/Cannon-Experiment | 0304_anna_cannon.py | 1 | 1152 | from TheCannon import apogee
from TheCannon import dataset
import numpy as np
from TheCannon import model
tr_ID, wl, tr_flux, tr_ivar = apogee.load_spectra("/Users/caojunzhi/Downloads/example_DR10/Data")
tr_label = apogee.load_labels("/Users/caojunzhi/Downloads/example_DR10/reference_labels.csv")
test_ID = tr_ID
test_flux = tr_flux
test_ivar = tr_ivar
ds = dataset.Dataset(wl, tr_ID, tr_flux, tr_ivar, tr_label, test_ID, test_flux, test_ivar)
ds.set_label_names(['T_{eff}', '\log g', '[Fe/H]'])
#fig = ds.diagnostics_SNR()
#fig = ds.diagnostics_ref_labels()
ds.ranges = [[371,3192], [3697,5500], [5500,5997], [6461,8255]]
pseudo_tr_flux, pseudo_tr_ivar = ds.continuum_normalize_training_q(q=0.90, delta_lambda=50)
contmask = ds.make_contmask(pseudo_tr_flux, pseudo_tr_ivar, frac=0.07)
ds.set_continuum(contmask)
cont = ds.fit_continuum(3, "sinusoid")
norm_tr_flux, norm_tr_ivar, norm_test_flux, norm_test_ivar = ds.continuum_normalize(cont)
print(np.mean(norm_test_ivar))
ds.tr_flux = norm_tr_flux
ds.tr_ivar = norm_tr_ivar
ds.test_flux = norm_test_flux
ds.test_ivar = norm_test_ivar
md = model.CannonModel(2)
md.fit(ds)
# inf flux:
| mit |
aysenurbilgin/tilt_api | src/engine/distributionalsemantics/ModelConstants.py | 1 | 5884 | import gensim
import numpy as np
from scipy.stats.stats import spearmanr
# global parameters for word2vec
ALPHA = 0.01 # initial learning rate, drops to min_alpha
MIN_ALPHA = 0.0001
CBOW_MEAN = 1 # http://stackoverflow.com/questions/34249586/the-accuracy-test-of-word2vec-in-gensim
class ModelAlignment:
def __init__(self):
pass
def smart_procrustes_align_gensim(self, base_embed, other_embed, words=None):
"""Procrustes align two gensim word2vec models (to allow for comparison between same word across models).
Code ported from HistWords <https://github.com/williamleif/histwords> by William Hamilton <wleif@stanford.edu>.
(With help from William. Thank you!)
First, intersect the vocabularies (see `intersection_align_gensim` documentation).
Then do the alignment on the other_embed model.
Replace the other_embed model's syn0 and syn0norm numpy matrices with the aligned version.
Return other_embed.
If `words` is set, intersect the two models' vocabulary with the vocabulary in words (see `intersection_align_gensim` documentation).
"""
# make sure vocabulary and indices are aligned
# in_base_embed, in_other_embed = ModelAlignment.intersection_align_gensim(base_embed, other_embed, words=words)
in_base_embed, in_other_embed = self.intersection_align_gensim(base_embed, other_embed, words=words)
# get the embedding matrices
base_vecs = in_base_embed.wv.syn0norm
other_vecs = in_other_embed.wv.syn0norm
# just a matrix dot product with numpy
m = other_vecs.T.dot(base_vecs)
# SVD method from numpy
u, _, v = np.linalg.svd(m)
# another matrix operation
ortho = u.dot(v)
# Replace original array with modified one
# i.e. multiplying the embedding matrix (syn0norm)by "ortho"
other_embed.wv.syn0norm = other_embed.wv.syn0 = (other_embed.wv.syn0norm).dot(ortho)
return other_embed
def intersection_align_gensim(self, m1, m2, words=None):
"""
Intersect two gensim word2vec models, m1 and m2.
Only the shared vocabulary between them is kept.
If 'words' is set (as list or set), then the vocabulary is intersected with this list as well.
Indices are re-organized from 0..N in order of descending frequency (=sum of counts from both m1 and m2).
These indices correspond to the new syn0 and syn0norm objects in both gensim models:
-- so that Row 0 of m1.syn0 will be for the same word as Row 0 of m2.syn0
-- you can find the index of any word on the .index2word list: model.index2word.index(word) => 2
The .vocab dictionary is also updated for each model, preserving the count but updating the index.
"""
# Get the vocab for each model
vocab_m1 = set(m1.wv.vocab.keys())
vocab_m2 = set(m2.wv.vocab.keys())
# Find the common vocabulary
common_vocab = vocab_m1&vocab_m2
if words: common_vocab&=set(words)
# If no alignment necessary because vocab is identical...
if not vocab_m1-common_vocab and not vocab_m2-common_vocab:
return (m1,m2)
# Otherwise sort by frequency (summed for both)
common_vocab = list(common_vocab)
common_vocab.sort(key=lambda w: m1.wv.vocab[w].count + m2.wv.vocab[w].count,reverse=True)
# Then for each model...
for m in [m1,m2]:
# Replace old syn0norm array with new one (with common vocab)
indices = [m.wv.vocab[w].index for w in common_vocab]
old_arr = m.wv.syn0norm
if old_arr is None:
old_arr = m.wv.syn0
new_arr = np.array([old_arr[index] for index in indices])
m.wv.syn0norm = m.wv.syn0 = new_arr
# Replace old vocab dictionary with new one (with common vocab)
# and old index2word with new one
m.wv.index2word = common_vocab
old_vocab = m.wv.vocab
new_vocab = {}
for new_index,word in enumerate(common_vocab):
old_vocab_obj=old_vocab[word]
new_vocab[word] = gensim.models.word2vec.Vocab(index=new_index, count=old_vocab_obj.count)
m.wv.vocab = new_vocab
return (m1,m2)
class ModelSimilarity:
def __init__(self):
pass
def calculateWordSimilarity(self, model, method_path):
# Return Spearman rho for a given key -> vector embedding mapping
# against data in standard similarity test file in method_path;
# used and total pairs with user supplied scores from the file are
# also returned
# https://xdata-skylark.github.io/libskylark/examples/randsvd_embeddings.html
test_data = []
with open(method_path) as f:
for line in f:
x, y, sim = line.strip().lower().split()
test_data.append(((x, y), sim))
results = []
misses = 0
for i, ((x, y), sim) in enumerate(test_data):
try:
results.append((self.getSimilarityForWords(x, y, model), sim))
except:
misses += 1
pass
res = zip(*results)
if len(res) != 2:
return None, None, None
actual, expected = zip(*results)
total = i + 1
used = total - misses
return spearmanr(actual, expected)[0], used, total
def getSimilarityForWords(self, word1, word2, model):
# For a given mapping word->vector word_vectors return
# the similarity of word1 and word2
# https://xdata-skylark.github.io/libskylark/examples/randsvd_embeddings.html
v1 = model.wv[word1]
v2 = model.wv[word2]
return np.dot(v1, v2) | apache-2.0 |
datalogics/scons | test/SideEffect/basic.py | 2 | 3266 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify basic operation of the SideEffect() method, using a "log
file" as the side effect "target."
"""
import os.path
import string
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """\
def copy(source, target):
open(target, "wb").write(open(source, "rb").read())
def build(env, source, target):
copy(str(source[0]), str(target[0]))
if target[0].side_effects:
side_effect = open(str(target[0].side_effects[0]), "ab")
side_effect.write('%%s -> %%s\\n'%%(str(source[0]), str(target[0])))
Build = Builder(action=build)
env = Environment(BUILDERS={'Build':Build}, SUBDIR='subdir')
env.Build('foo.out', 'foo.in')
env.Build('bar.out', 'bar.in')
env.Build('blat.out', 'blat.in')
SideEffect('log.txt', ['foo.out', 'bar.out', 'blat.out'])
env.Build('log.out', 'log.txt')
env.Build('subdir/baz.out', 'baz.in')
env.SideEffect(r'%s', ['blat.out', r'%s'])
env.Build('subdir/out.out', 'subdir/out.txt')
""" % (os.path.join('$SUBDIR', 'out.txt'),
os.path.join('$SUBDIR', 'baz.out')))
test.write('foo.in', 'foo.in\n')
test.write('bar.in', 'bar.in\n')
test.write('blat.in', 'blat.in\n')
test.write('baz.in', 'baz.in\n')
test.run(arguments = 'foo.out bar.out', stdout=test.wrap_stdout("""\
build(["foo.out"], ["foo.in"])
build(["bar.out"], ["bar.in"])
"""))
expect = """\
foo.in -> foo.out
bar.in -> bar.out
"""
test.must_match('log.txt', expect)
test.write('bar.in', 'bar.in 2 \n')
test.run(arguments = 'log.txt', stdout=test.wrap_stdout("""\
build(["bar.out"], ["bar.in"])
build(["blat.out"], ["blat.in"])
"""))
expect = expect + """\
bar.in -> bar.out
blat.in -> blat.out
"""
test.must_match('log.txt', expect)
test.write('foo.in', 'foo.in 2 \n')
test.run(arguments = ".", stdout=test.wrap_stdout("""\
build(["foo.out"], ["foo.in"])
build(["log.out"], ["log.txt"])
build(["%s"], ["baz.in"])
build(["%s"], ["%s"])
""" % (os.path.join('subdir', 'baz.out'),
os.path.join('subdir', 'out.out'),
os.path.join('subdir', 'out.txt'))))
expect = expect + """\
foo.in -> foo.out
"""
test.must_match('log.txt', expect)
test.pass_test()
| mit |
zaqwes8811/matlab_ext | identification/dals/os_io/for_analyse/FileOperationsLib.py | 1 | 1102 | #-*- coding: utf-8 -*-
import usaio
''' добавляет строку в фиксирванный файл '''
def printStrToFile(string, fname):
filename = fname
# Create a file object:
# in "write" mode
FILE = open(filename,"at")
# Write all the lines at once:
FILE.writelines(string)
# Alternatively write them one by one:
FILE.close()
''' '''
def wrString(ofile, string):
try:
fwr = open(ofile, "w")
try:
fwr.write(string) # Write a string to a file
finally:
fwr.close()
except IOError:
print 'write error'
''' переводит строки файла в список '''
def fileToList(fname, inList):
string = None
try:
f = open(fname, "r")
try:
# Read the entire contents of a file at once.
while True:
string = f.readline()
if(string == ''):
break
string = string.replace( '\n', '' )
inList.append(string)
# обязательно будет выполнено
finally:
f.close()
except IOError:
print 'read error'
| apache-2.0 |
rrrrrr8/vnpy | vnpy/trader/app/ctaStrategy/ctaBacktesting.py | 1 | 56364 | # encoding: UTF-8
'''
本文件中包含的是CTA模块的回测引擎,回测引擎的API和CTA引擎一致,
可以使用和实盘相同的代码进行回测。
'''
from __future__ import division
from __future__ import print_function
from datetime import datetime, timedelta
from collections import OrderedDict
from itertools import product
import multiprocessing
import copy
import pymongo
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from vnpy.rpc import RpcClient, RpcServer, RemoteException
# 如果安装了seaborn则设置为白色风格
try:
import seaborn as sns
sns.set_style('whitegrid')
except ImportError:
pass
from vnpy.trader.vtGlobal import globalSetting
from vnpy.trader.vtObject import VtTickData, VtBarData
from vnpy.trader.vtConstant import *
from vnpy.trader.vtGateway import VtOrderData, VtTradeData
from .ctaBase import *
########################################################################
class BacktestingEngine(object):
"""
CTA回测引擎
函数接口和策略引擎保持一样,
从而实现同一套代码从回测到实盘。
"""
TICK_MODE = 'tick'
BAR_MODE = 'bar'
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
# 本地停止单
self.stopOrderCount = 0 # 编号计数:stopOrderID = STOPORDERPREFIX + str(stopOrderCount)
# 本地停止单字典, key为stopOrderID,value为stopOrder对象
self.stopOrderDict = {} # 停止单撤销后不会从本字典中删除
self.workingStopOrderDict = {} # 停止单撤销后会从本字典中删除
self.engineType = ENGINETYPE_BACKTESTING # 引擎类型为回测
self.strategy = None # 回测策略
self.mode = self.BAR_MODE # 回测模式,默认为K线
self.startDate = ''
self.initDays = 0
self.endDate = ''
self.capital = 1000000 # 回测时的起始本金(默认100万)
self.slippage = 0 # 回测时假设的滑点
self.rate = 0 # 回测时假设的佣金比例(适用于百分比佣金)
self.size = 1 # 合约大小,默认为1
self.priceTick = 0 # 价格最小变动
self.dbClient = None # 数据库客户端
self.dbCursor = None # 数据库指针
self.hdsClient = None # 历史数据服务器客户端
self.initData = [] # 初始化用的数据
self.dbName = '' # 回测数据库名
self.symbol = '' # 回测集合名
self.dataStartDate = None # 回测数据开始日期,datetime对象
self.dataEndDate = None # 回测数据结束日期,datetime对象
self.strategyStartDate = None # 策略启动日期(即前面的数据用于初始化),datetime对象
self.limitOrderCount = 0 # 限价单编号
self.limitOrderDict = OrderedDict() # 限价单字典
self.workingLimitOrderDict = OrderedDict() # 活动限价单字典,用于进行撮合用
self.tradeCount = 0 # 成交编号
self.tradeDict = OrderedDict() # 成交字典
self.logList = [] # 日志记录
# 当前最新数据,用于模拟成交用
self.tick = None
self.bar = None
self.dt = None # 最新的时间
# 日线回测结果计算用
self.dailyResultDict = OrderedDict()
#------------------------------------------------
# 通用功能
#------------------------------------------------
#----------------------------------------------------------------------
def roundToPriceTick(self, price):
"""取整价格到合约最小价格变动"""
if not self.priceTick:
return price
newPrice = round(price/self.priceTick, 0) * self.priceTick
return newPrice
#----------------------------------------------------------------------
def output(self, content):
"""输出内容"""
print(str(datetime.now()) + "\t" + content)
#------------------------------------------------
# 参数设置相关
#------------------------------------------------
#----------------------------------------------------------------------
def setStartDate(self, startDate='20100416', initDays=10):
"""设置回测的启动日期"""
self.startDate = startDate
self.initDays = initDays
self.dataStartDate = datetime.strptime(startDate, '%Y%m%d')
initTimeDelta = timedelta(initDays)
self.strategyStartDate = self.dataStartDate + initTimeDelta
#----------------------------------------------------------------------
def setEndDate(self, endDate=''):
"""设置回测的结束日期"""
self.endDate = endDate
if endDate:
self.dataEndDate = datetime.strptime(endDate, '%Y%m%d')
# 若不修改时间则会导致不包含dataEndDate当天数据
self.dataEndDate = self.dataEndDate.replace(hour=23, minute=59)
#----------------------------------------------------------------------
def setBacktestingMode(self, mode):
"""设置回测模式"""
self.mode = mode
#----------------------------------------------------------------------
def setDatabase(self, dbName, symbol):
"""设置历史数据所用的数据库"""
self.dbName = dbName
self.symbol = symbol
#----------------------------------------------------------------------
def setCapital(self, capital):
"""设置资本金"""
self.capital = capital
#----------------------------------------------------------------------
def setSlippage(self, slippage):
"""设置滑点点数"""
self.slippage = slippage
#----------------------------------------------------------------------
def setSize(self, size):
"""设置合约大小"""
self.size = size
#----------------------------------------------------------------------
def setRate(self, rate):
"""设置佣金比例"""
self.rate = rate
#----------------------------------------------------------------------
def setPriceTick(self, priceTick):
"""设置价格最小变动"""
self.priceTick = priceTick
#------------------------------------------------
# 数据回放相关
#------------------------------------------------
#----------------------------------------------------------------------
def initHdsClient(self):
"""初始化历史数据服务器客户端"""
reqAddress = 'tcp://localhost:5555'
subAddress = 'tcp://localhost:7777'
self.hdsClient = RpcClient(reqAddress, subAddress)
self.hdsClient.start()
#----------------------------------------------------------------------
def loadHistoryData(self):
"""载入历史数据"""
self.dbClient = pymongo.MongoClient(globalSetting['mongoHost'], globalSetting['mongoPort'])
collection = self.dbClient[self.dbName][self.symbol]
self.output(u'开始载入数据')
# 首先根据回测模式,确认要使用的数据类
if self.mode == self.BAR_MODE:
dataClass = VtBarData
func = self.newBar
else:
dataClass = VtTickData
func = self.newTick
# 载入初始化需要用的数据
if self.hdsClient:
initCursor = self.hdsClient.loadHistoryData(self.dbName,
self.symbol,
self.dataStartDate,
self.strategyStartDate)
else:
flt = {'datetime':{'$gte':self.dataStartDate,
'$lt':self.strategyStartDate}}
initCursor = collection.find(flt).sort('datetime')
# 将数据从查询指针中读取出,并生成列表
self.initData = [] # 清空initData列表
for d in initCursor:
data = dataClass()
data.__dict__ = d
self.initData.append(data)
# 载入回测数据
if self.hdsClient:
self.dbCursor = self.hdsClient.loadHistoryData(self.dbName,
self.symbol,
self.strategyStartDate,
self.dataEndDate)
else:
if not self.dataEndDate:
flt = {'datetime':{'$gte':self.strategyStartDate}} # 数据过滤条件
else:
flt = {'datetime':{'$gte':self.strategyStartDate,
'$lte':self.dataEndDate}}
self.dbCursor = collection.find(flt).sort('datetime')
if isinstance(self.dbCursor, list):
count = len(initCursor) + len(self.dbCursor)
else:
count = initCursor.count() + self.dbCursor.count()
self.output(u'载入完成,数据量:%s' %count)
#----------------------------------------------------------------------
def runBacktesting(self):
"""运行回测"""
# 载入历史数据
self.loadHistoryData()
# 首先根据回测模式,确认要使用的数据类
if self.mode == self.BAR_MODE:
dataClass = VtBarData
func = self.newBar
else:
dataClass = VtTickData
func = self.newTick
self.output(u'开始回测')
self.strategy.onInit()
self.strategy.inited = True
self.output(u'策略初始化完成')
self.strategy.trading = True
self.strategy.onStart()
self.output(u'策略启动完成')
self.output(u'开始回放数据')
for d in self.dbCursor:
data = dataClass()
data.__dict__ = d
func(data)
self.output(u'数据回放结束')
#----------------------------------------------------------------------
def newBar(self, bar):
"""新的K线"""
self.bar = bar
self.dt = bar.datetime
self.crossLimitOrder() # 先撮合限价单
self.crossStopOrder() # 再撮合停止单
self.strategy.onBar(bar) # 推送K线到策略中
self.updateDailyClose(bar.datetime, bar.close)
#----------------------------------------------------------------------
def newTick(self, tick):
"""新的Tick"""
self.tick = tick
self.dt = tick.datetime
self.crossLimitOrder()
self.crossStopOrder()
self.strategy.onTick(tick)
self.updateDailyClose(tick.datetime, tick.lastPrice)
#----------------------------------------------------------------------
def initStrategy(self, strategyClass, setting=None):
"""
初始化策略
setting是策略的参数设置,如果使用类中写好的默认设置则可以不传该参数
"""
self.strategy = strategyClass(self, setting)
self.strategy.name = self.strategy.className
#----------------------------------------------------------------------
def crossLimitOrder(self):
"""基于最新数据撮合限价单"""
# 先确定会撮合成交的价格
if self.mode == self.BAR_MODE:
buyCrossPrice = self.bar.low # 若买入方向限价单价格高于该价格,则会成交
sellCrossPrice = self.bar.high # 若卖出方向限价单价格低于该价格,则会成交
buyBestCrossPrice = self.bar.open # 在当前时间点前发出的买入委托可能的最优成交价
sellBestCrossPrice = self.bar.open # 在当前时间点前发出的卖出委托可能的最优成交价
else:
buyCrossPrice = self.tick.askPrice1
sellCrossPrice = self.tick.bidPrice1
buyBestCrossPrice = self.tick.askPrice1
sellBestCrossPrice = self.tick.bidPrice1
# 遍历限价单字典中的所有限价单
for orderID, order in self.workingLimitOrderDict.items():
# 推送委托进入队列(未成交)的状态更新
if not order.status:
order.status = STATUS_NOTTRADED
self.strategy.onOrder(order)
# 判断是否会成交
buyCross = (order.direction==DIRECTION_LONG and
order.price>=buyCrossPrice and
buyCrossPrice > 0) # 国内的tick行情在涨停时askPrice1为0,此时买无法成交
sellCross = (order.direction==DIRECTION_SHORT and
order.price<=sellCrossPrice and
sellCrossPrice > 0) # 国内的tick行情在跌停时bidPrice1为0,此时卖无法成交
# 如果发生了成交
if buyCross or sellCross:
# 推送成交数据
self.tradeCount += 1 # 成交编号自增1
tradeID = str(self.tradeCount)
trade = VtTradeData()
trade.vtSymbol = order.vtSymbol
trade.tradeID = tradeID
trade.vtTradeID = tradeID
trade.orderID = order.orderID
trade.vtOrderID = order.orderID
trade.direction = order.direction
trade.offset = order.offset
# 以买入为例:
# 1. 假设当根K线的OHLC分别为:100, 125, 90, 110
# 2. 假设在上一根K线结束(也是当前K线开始)的时刻,策略发出的委托为限价105
# 3. 则在实际中的成交价会是100而不是105,因为委托发出时市场的最优价格是100
if buyCross:
trade.price = min(order.price, buyBestCrossPrice)
self.strategy.pos += order.totalVolume
else:
trade.price = max(order.price, sellBestCrossPrice)
self.strategy.pos -= order.totalVolume
trade.volume = order.totalVolume
trade.tradeTime = self.dt.strftime('%H:%M:%S')
trade.dt = self.dt
self.strategy.onTrade(trade)
self.tradeDict[tradeID] = trade
# 推送委托数据
order.tradedVolume = order.totalVolume
order.status = STATUS_ALLTRADED
self.strategy.onOrder(order)
# 从字典中删除该限价单
if orderID in self.workingLimitOrderDict:
del self.workingLimitOrderDict[orderID]
#----------------------------------------------------------------------
def crossStopOrder(self):
"""基于最新数据撮合停止单"""
# 先确定会撮合成交的价格,这里和限价单规则相反
if self.mode == self.BAR_MODE:
buyCrossPrice = self.bar.high # 若买入方向停止单价格低于该价格,则会成交
sellCrossPrice = self.bar.low # 若卖出方向限价单价格高于该价格,则会成交
bestCrossPrice = self.bar.open # 最优成交价,买入停止单不能低于,卖出停止单不能高于
else:
buyCrossPrice = self.tick.lastPrice
sellCrossPrice = self.tick.lastPrice
bestCrossPrice = self.tick.lastPrice
# 遍历停止单字典中的所有停止单
for stopOrderID, so in self.workingStopOrderDict.items():
# 判断是否会成交
buyCross = so.direction==DIRECTION_LONG and so.price<=buyCrossPrice
sellCross = so.direction==DIRECTION_SHORT and so.price>=sellCrossPrice
# 如果发生了成交
if buyCross or sellCross:
# 更新停止单状态,并从字典中删除该停止单
so.status = STOPORDER_TRIGGERED
if stopOrderID in self.workingStopOrderDict:
del self.workingStopOrderDict[stopOrderID]
# 推送成交数据
self.tradeCount += 1 # 成交编号自增1
tradeID = str(self.tradeCount)
trade = VtTradeData()
trade.vtSymbol = so.vtSymbol
trade.tradeID = tradeID
trade.vtTradeID = tradeID
if buyCross:
self.strategy.pos += so.volume
trade.price = max(bestCrossPrice, so.price)
else:
self.strategy.pos -= so.volume
trade.price = min(bestCrossPrice, so.price)
self.limitOrderCount += 1
orderID = str(self.limitOrderCount)
trade.orderID = orderID
trade.vtOrderID = orderID
trade.direction = so.direction
trade.offset = so.offset
trade.volume = so.volume
trade.tradeTime = self.dt.strftime('%H:%M:%S')
trade.dt = self.dt
self.tradeDict[tradeID] = trade
# 推送委托数据
order = VtOrderData()
order.vtSymbol = so.vtSymbol
order.symbol = so.vtSymbol
order.orderID = orderID
order.vtOrderID = orderID
order.direction = so.direction
order.offset = so.offset
order.price = so.price
order.totalVolume = so.volume
order.tradedVolume = so.volume
order.status = STATUS_ALLTRADED
order.orderTime = trade.tradeTime
self.limitOrderDict[orderID] = order
# 按照顺序推送数据
self.strategy.onStopOrder(so)
self.strategy.onOrder(order)
self.strategy.onTrade(trade)
#------------------------------------------------
# 策略接口相关
#------------------------------------------------
#----------------------------------------------------------------------
def sendOrder(self, vtSymbol, orderType, price, volume, strategy):
"""发单"""
self.limitOrderCount += 1
orderID = str(self.limitOrderCount)
order = VtOrderData()
order.vtSymbol = vtSymbol
order.price = self.roundToPriceTick(price)
order.totalVolume = volume
order.orderID = orderID
order.vtOrderID = orderID
order.orderTime = self.dt.strftime('%H:%M:%S')
# CTA委托类型映射
if orderType == CTAORDER_BUY:
order.direction = DIRECTION_LONG
order.offset = OFFSET_OPEN
elif orderType == CTAORDER_SELL:
order.direction = DIRECTION_SHORT
order.offset = OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
order.direction = DIRECTION_SHORT
order.offset = OFFSET_OPEN
elif orderType == CTAORDER_COVER:
order.direction = DIRECTION_LONG
order.offset = OFFSET_CLOSE
# 保存到限价单字典中
self.workingLimitOrderDict[orderID] = order
self.limitOrderDict[orderID] = order
return [orderID]
#----------------------------------------------------------------------
def cancelOrder(self, vtOrderID):
"""撤单"""
if vtOrderID in self.workingLimitOrderDict:
order = self.workingLimitOrderDict[vtOrderID]
order.status = STATUS_CANCELLED
order.cancelTime = self.dt.strftime('%H:%M:%S')
self.strategy.onOrder(order)
del self.workingLimitOrderDict[vtOrderID]
#----------------------------------------------------------------------
def sendStopOrder(self, vtSymbol, orderType, price, volume, strategy):
"""发停止单(本地实现)"""
self.stopOrderCount += 1
stopOrderID = STOPORDERPREFIX + str(self.stopOrderCount)
so = StopOrder()
so.vtSymbol = vtSymbol
so.price = self.roundToPriceTick(price)
so.volume = volume
so.strategy = strategy
so.status = STOPORDER_WAITING
so.stopOrderID = stopOrderID
if orderType == CTAORDER_BUY:
so.direction = DIRECTION_LONG
so.offset = OFFSET_OPEN
elif orderType == CTAORDER_SELL:
so.direction = DIRECTION_SHORT
so.offset = OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
so.direction = DIRECTION_SHORT
so.offset = OFFSET_OPEN
elif orderType == CTAORDER_COVER:
so.direction = DIRECTION_LONG
so.offset = OFFSET_CLOSE
# 保存stopOrder对象到字典中
self.stopOrderDict[stopOrderID] = so
self.workingStopOrderDict[stopOrderID] = so
# 推送停止单初始更新
self.strategy.onStopOrder(so)
return [stopOrderID]
#----------------------------------------------------------------------
def cancelStopOrder(self, stopOrderID):
"""撤销停止单"""
# 检查停止单是否存在
if stopOrderID in self.workingStopOrderDict:
so = self.workingStopOrderDict[stopOrderID]
so.status = STOPORDER_CANCELLED
del self.workingStopOrderDict[stopOrderID]
self.strategy.onStopOrder(so)
#----------------------------------------------------------------------
def putStrategyEvent(self, name):
"""发送策略更新事件,回测中忽略"""
pass
#----------------------------------------------------------------------
def insertData(self, dbName, collectionName, data):
"""考虑到回测中不允许向数据库插入数据,防止实盘交易中的一些代码出错"""
pass
#----------------------------------------------------------------------
def loadBar(self, dbName, collectionName, startDate):
"""直接返回初始化数据列表中的Bar"""
return self.initData
#----------------------------------------------------------------------
def loadTick(self, dbName, collectionName, startDate):
"""直接返回初始化数据列表中的Tick"""
return self.initData
#----------------------------------------------------------------------
def writeCtaLog(self, content):
"""记录日志"""
log = str(self.dt) + ' ' + content
self.logList.append(log)
#----------------------------------------------------------------------
def cancelAll(self, name):
"""全部撤单"""
# 撤销限价单
for orderID in self.workingLimitOrderDict.keys():
self.cancelOrder(orderID)
# 撤销停止单
for stopOrderID in self.workingStopOrderDict.keys():
self.cancelStopOrder(stopOrderID)
#----------------------------------------------------------------------
def saveSyncData(self, strategy):
"""保存同步数据(无效)"""
pass
#----------------------------------------------------------------------
def getPriceTick(self, strategy):
"""获取最小价格变动"""
return self.priceTick
#------------------------------------------------
# 结果计算相关
#------------------------------------------------
#----------------------------------------------------------------------
def calculateBacktestingResult(self):
"""
计算回测结果
"""
self.output(u'计算回测结果')
# 检查成交记录
if not self.tradeDict:
self.output(u'成交记录为空,无法计算回测结果')
return {}
# 首先基于回测后的成交记录,计算每笔交易的盈亏
resultList = [] # 交易结果列表
longTrade = [] # 未平仓的多头交易
shortTrade = [] # 未平仓的空头交易
tradeTimeList = [] # 每笔成交时间戳
posList = [0] # 每笔成交后的持仓情况
for trade in self.tradeDict.values():
# 复制成交对象,因为下面的开平仓交易配对涉及到对成交数量的修改
# 若不进行复制直接操作,则计算完后所有成交的数量会变成0
trade = copy.copy(trade)
# 多头交易
if trade.direction == DIRECTION_LONG:
# 如果尚无空头交易
if not shortTrade:
longTrade.append(trade)
# 当前多头交易为平空
else:
while True:
entryTrade = shortTrade[0]
exitTrade = trade
# 清算开平仓交易
closedVolume = min(exitTrade.volume, entryTrade.volume)
result = TradingResult(entryTrade.price, entryTrade.dt,
exitTrade.price, exitTrade.dt,
-closedVolume, self.rate, self.slippage, self.size)
resultList.append(result)
posList.extend([-1,0])
tradeTimeList.extend([result.entryDt, result.exitDt])
# 计算未清算部分
entryTrade.volume -= closedVolume
exitTrade.volume -= closedVolume
# 如果开仓交易已经全部清算,则从列表中移除
if not entryTrade.volume:
shortTrade.pop(0)
# 如果平仓交易已经全部清算,则退出循环
if not exitTrade.volume:
break
# 如果平仓交易未全部清算,
if exitTrade.volume:
# 且开仓交易已经全部清算完,则平仓交易剩余的部分
# 等于新的反向开仓交易,添加到队列中
if not shortTrade:
longTrade.append(exitTrade)
break
# 如果开仓交易还有剩余,则进入下一轮循环
else:
pass
# 空头交易
else:
# 如果尚无多头交易
if not longTrade:
shortTrade.append(trade)
# 当前空头交易为平多
else:
while True:
entryTrade = longTrade[0]
exitTrade = trade
# 清算开平仓交易
closedVolume = min(exitTrade.volume, entryTrade.volume)
result = TradingResult(entryTrade.price, entryTrade.dt,
exitTrade.price, exitTrade.dt,
closedVolume, self.rate, self.slippage, self.size)
resultList.append(result)
posList.extend([1,0])
tradeTimeList.extend([result.entryDt, result.exitDt])
# 计算未清算部分
entryTrade.volume -= closedVolume
exitTrade.volume -= closedVolume
# 如果开仓交易已经全部清算,则从列表中移除
if not entryTrade.volume:
longTrade.pop(0)
# 如果平仓交易已经全部清算,则退出循环
if not exitTrade.volume:
break
# 如果平仓交易未全部清算,
if exitTrade.volume:
# 且开仓交易已经全部清算完,则平仓交易剩余的部分
# 等于新的反向开仓交易,添加到队列中
if not longTrade:
shortTrade.append(exitTrade)
break
# 如果开仓交易还有剩余,则进入下一轮循环
else:
pass
# 到最后交易日尚未平仓的交易,则以最后价格平仓
if self.mode == self.BAR_MODE:
endPrice = self.bar.close
else:
endPrice = self.tick.lastPrice
for trade in longTrade:
result = TradingResult(trade.price, trade.dt, endPrice, self.dt,
trade.volume, self.rate, self.slippage, self.size)
resultList.append(result)
for trade in shortTrade:
result = TradingResult(trade.price, trade.dt, endPrice, self.dt,
-trade.volume, self.rate, self.slippage, self.size)
resultList.append(result)
# 检查是否有交易
if not resultList:
self.output(u'无交易结果')
return {}
# 然后基于每笔交易的结果,我们可以计算具体的盈亏曲线和最大回撤等
capital = 0 # 资金
maxCapital = 0 # 资金最高净值
drawdown = 0 # 回撤
totalResult = 0 # 总成交数量
totalTurnover = 0 # 总成交金额(合约面值)
totalCommission = 0 # 总手续费
totalSlippage = 0 # 总滑点
timeList = [] # 时间序列
pnlList = [] # 每笔盈亏序列
capitalList = [] # 盈亏汇总的时间序列
drawdownList = [] # 回撤的时间序列
winningResult = 0 # 盈利次数
losingResult = 0 # 亏损次数
totalWinning = 0 # 总盈利金额
totalLosing = 0 # 总亏损金额
for result in resultList:
capital += result.pnl
maxCapital = max(capital, maxCapital)
drawdown = capital - maxCapital
pnlList.append(result.pnl)
timeList.append(result.exitDt) # 交易的时间戳使用平仓时间
capitalList.append(capital)
drawdownList.append(drawdown)
totalResult += 1
totalTurnover += result.turnover
totalCommission += result.commission
totalSlippage += result.slippage
if result.pnl >= 0:
winningResult += 1
totalWinning += result.pnl
else:
losingResult += 1
totalLosing += result.pnl
# 计算盈亏相关数据
winningRate = winningResult/totalResult*100 # 胜率
averageWinning = 0 # 这里把数据都初始化为0
averageLosing = 0
profitLossRatio = 0
if winningResult:
averageWinning = totalWinning/winningResult # 平均每笔盈利
if losingResult:
averageLosing = totalLosing/losingResult # 平均每笔亏损
if averageLosing:
profitLossRatio = -averageWinning/averageLosing # 盈亏比
# 返回回测结果
d = {}
d['capital'] = capital
d['maxCapital'] = maxCapital
d['drawdown'] = drawdown
d['totalResult'] = totalResult
d['totalTurnover'] = totalTurnover
d['totalCommission'] = totalCommission
d['totalSlippage'] = totalSlippage
d['timeList'] = timeList
d['pnlList'] = pnlList
d['capitalList'] = capitalList
d['drawdownList'] = drawdownList
d['winningRate'] = winningRate
d['averageWinning'] = averageWinning
d['averageLosing'] = averageLosing
d['profitLossRatio'] = profitLossRatio
d['posList'] = posList
d['tradeTimeList'] = tradeTimeList
d['resultList'] = resultList
return d
#----------------------------------------------------------------------
def showBacktestingResult(self):
"""显示回测结果"""
d = self.calculateBacktestingResult()
# 输出
self.output('-' * 30)
self.output(u'第一笔交易:\t%s' % d['timeList'][0])
self.output(u'最后一笔交易:\t%s' % d['timeList'][-1])
self.output(u'总交易次数:\t%s' % formatNumber(d['totalResult']))
self.output(u'总盈亏:\t%s' % formatNumber(d['capital']))
self.output(u'最大回撤: \t%s' % formatNumber(min(d['drawdownList'])))
self.output(u'平均每笔盈利:\t%s' %formatNumber(d['capital']/d['totalResult']))
self.output(u'平均每笔滑点:\t%s' %formatNumber(d['totalSlippage']/d['totalResult']))
self.output(u'平均每笔佣金:\t%s' %formatNumber(d['totalCommission']/d['totalResult']))
self.output(u'胜率\t\t%s%%' %formatNumber(d['winningRate']))
self.output(u'盈利交易平均值\t%s' %formatNumber(d['averageWinning']))
self.output(u'亏损交易平均值\t%s' %formatNumber(d['averageLosing']))
self.output(u'盈亏比:\t%s' %formatNumber(d['profitLossRatio']))
# 绘图
fig = plt.figure(figsize=(10, 16))
pCapital = plt.subplot(4, 1, 1)
pCapital.set_ylabel("capital")
pCapital.plot(d['capitalList'], color='r', lw=0.8)
pDD = plt.subplot(4, 1, 2)
pDD.set_ylabel("DD")
pDD.bar(range(len(d['drawdownList'])), d['drawdownList'], color='g')
pPnl = plt.subplot(4, 1, 3)
pPnl.set_ylabel("pnl")
pPnl.hist(d['pnlList'], bins=50, color='c')
pPos = plt.subplot(4, 1, 4)
pPos.set_ylabel("Position")
if d['posList'][-1] == 0:
del d['posList'][-1]
tradeTimeIndex = [item.strftime("%m/%d %H:%M:%S") for item in d['tradeTimeList']]
xindex = np.arange(0, len(tradeTimeIndex), np.int(len(tradeTimeIndex)/10))
tradeTimeIndex = map(lambda i: tradeTimeIndex[i], xindex)
pPos.plot(d['posList'], color='k', drawstyle='steps-pre')
pPos.set_ylim(-1.2, 1.2)
plt.sca(pPos)
plt.tight_layout()
plt.xticks(xindex, tradeTimeIndex, rotation=30) # 旋转15
plt.show()
#----------------------------------------------------------------------
def clearBacktestingResult(self):
"""清空之前回测的结果"""
# 清空限价单相关
self.limitOrderCount = 0
self.limitOrderDict.clear()
self.workingLimitOrderDict.clear()
# 清空停止单相关
self.stopOrderCount = 0
self.stopOrderDict.clear()
self.workingStopOrderDict.clear()
# 清空成交相关
self.tradeCount = 0
self.tradeDict.clear()
#----------------------------------------------------------------------
def runOptimization(self, strategyClass, optimizationSetting):
"""优化参数"""
# 获取优化设置
settingList = optimizationSetting.generateSetting()
targetName = optimizationSetting.optimizeTarget
# 检查参数设置问题
if not settingList or not targetName:
self.output(u'优化设置有问题,请检查')
# 遍历优化
resultList = []
for setting in settingList:
self.clearBacktestingResult()
self.output('-' * 30)
self.output('setting: %s' %str(setting))
self.initStrategy(strategyClass, setting)
self.runBacktesting()
df = self.calculateDailyResult()
df, d = self.calculateDailyStatistics(df)
try:
targetValue = d[targetName]
except KeyError:
targetValue = 0
resultList.append(([str(setting)], targetValue, d))
# 显示结果
resultList.sort(reverse=True, key=lambda result:result[1])
self.output('-' * 30)
self.output(u'优化结果:')
for result in resultList:
self.output(u'参数:%s,目标:%s' %(result[0], result[1]))
return resultList
#----------------------------------------------------------------------
def runParallelOptimization(self, strategyClass, optimizationSetting):
"""并行优化参数"""
# 获取优化设置
settingList = optimizationSetting.generateSetting()
targetName = optimizationSetting.optimizeTarget
# 检查参数设置问题
if not settingList or not targetName:
self.output(u'优化设置有问题,请检查')
# 多进程优化,启动一个对应CPU核心数量的进程池
pool = multiprocessing.Pool(multiprocessing.cpu_count())
l = []
for setting in settingList:
l.append(pool.apply_async(optimize, (strategyClass, setting,
targetName, self.mode,
self.startDate, self.initDays, self.endDate,
self.slippage, self.rate, self.size, self.priceTick,
self.dbName, self.symbol)))
pool.close()
pool.join()
# 显示结果
resultList = [res.get() for res in l]
resultList.sort(reverse=True, key=lambda result:result[1])
self.output('-' * 30)
self.output(u'优化结果:')
for result in resultList:
self.output(u'参数:%s,目标:%s' %(result[0], result[1]))
return resultList
#----------------------------------------------------------------------
def updateDailyClose(self, dt, price):
"""更新每日收盘价"""
date = dt.date()
if date not in self.dailyResultDict:
self.dailyResultDict[date] = DailyResult(date, price)
else:
self.dailyResultDict[date].closePrice = price
#----------------------------------------------------------------------
def calculateDailyResult(self):
"""计算按日统计的交易结果"""
self.output(u'计算按日统计结果')
# 检查成交记录
if not self.tradeDict:
self.output(u'成交记录为空,无法计算回测结果')
return {}
# 将成交添加到每日交易结果中
for trade in self.tradeDict.values():
date = trade.dt.date()
dailyResult = self.dailyResultDict[date]
dailyResult.addTrade(trade)
# 遍历计算每日结果
previousClose = 0
openPosition = 0
for dailyResult in self.dailyResultDict.values():
dailyResult.previousClose = previousClose
previousClose = dailyResult.closePrice
dailyResult.calculatePnl(openPosition, self.size, self.rate, self.slippage )
openPosition = dailyResult.closePosition
# 生成DataFrame
resultDict = {k:[] for k in dailyResult.__dict__.keys()}
for dailyResult in self.dailyResultDict.values():
for k, v in dailyResult.__dict__.items():
resultDict[k].append(v)
resultDf = pd.DataFrame.from_dict(resultDict)
# 计算衍生数据
resultDf = resultDf.set_index('date')
return resultDf
#----------------------------------------------------------------------
def calculateDailyStatistics(self, df):
"""计算按日统计的结果"""
df['balance'] = df['netPnl'].cumsum() + self.capital
df['return'] = (np.log(df['balance']) - np.log(df['balance'].shift(1))).fillna(0)
df['highlevel'] = df['balance'].rolling(min_periods=1,window=len(df),center=False).max()
df['drawdown'] = df['balance'] - df['highlevel']
df['ddPercent'] = df['drawdown'] / df['highlevel'] * 100
# 计算统计结果
startDate = df.index[0]
endDate = df.index[-1]
totalDays = len(df)
profitDays = len(df[df['netPnl']>0])
lossDays = len(df[df['netPnl']<0])
endBalance = df['balance'].iloc[-1]
maxDrawdown = df['drawdown'].min()
maxDdPercent = df['ddPercent'].min()
totalNetPnl = df['netPnl'].sum()
dailyNetPnl = totalNetPnl / totalDays
totalCommission = df['commission'].sum()
dailyCommission = totalCommission / totalDays
totalSlippage = df['slippage'].sum()
dailySlippage = totalSlippage / totalDays
totalTurnover = df['turnover'].sum()
dailyTurnover = totalTurnover / totalDays
totalTradeCount = df['tradeCount'].sum()
dailyTradeCount = totalTradeCount / totalDays
totalReturn = (endBalance/self.capital - 1) * 100
annualizedReturn = totalReturn / totalDays * 240
dailyReturn = df['return'].mean() * 100
returnStd = df['return'].std() * 100
if returnStd:
sharpeRatio = dailyReturn / returnStd * np.sqrt(240)
else:
sharpeRatio = 0
# 返回结果
result = {
'startDate': startDate,
'endDate': endDate,
'totalDays': totalDays,
'profitDays': profitDays,
'lossDays': lossDays,
'endBalance': endBalance,
'maxDrawdown': maxDrawdown,
'maxDdPercent': maxDdPercent,
'totalNetPnl': totalNetPnl,
'dailyNetPnl': dailyNetPnl,
'totalCommission': totalCommission,
'dailyCommission': dailyCommission,
'totalSlippage': totalSlippage,
'dailySlippage': dailySlippage,
'totalTurnover': totalTurnover,
'dailyTurnover': dailyTurnover,
'totalTradeCount': totalTradeCount,
'dailyTradeCount': dailyTradeCount,
'totalReturn': totalReturn,
'annualizedReturn': annualizedReturn,
'dailyReturn': dailyReturn,
'returnStd': returnStd,
'sharpeRatio': sharpeRatio
}
return df, result
#----------------------------------------------------------------------
def showDailyResult(self, df=None, result=None):
"""显示按日统计的交易结果"""
if df is None:
df = self.calculateDailyResult()
df, result = self.calculateDailyStatistics(df)
# 输出统计结果
self.output('-' * 30)
self.output(u'首个交易日:\t%s' % result['startDate'])
self.output(u'最后交易日:\t%s' % result['endDate'])
self.output(u'总交易日:\t%s' % result['totalDays'])
self.output(u'盈利交易日\t%s' % result['profitDays'])
self.output(u'亏损交易日:\t%s' % result['lossDays'])
self.output(u'起始资金:\t%s' % self.capital)
self.output(u'结束资金:\t%s' % formatNumber(result['endBalance']))
self.output(u'总收益率:\t%s%%' % formatNumber(result['totalReturn']))
self.output(u'年化收益:\t%s%%' % formatNumber(result['annualizedReturn']))
self.output(u'总盈亏:\t%s' % formatNumber(result['totalNetPnl']))
self.output(u'最大回撤: \t%s' % formatNumber(result['maxDrawdown']))
self.output(u'百分比最大回撤: %s%%' % formatNumber(result['maxDdPercent']))
self.output(u'总手续费:\t%s' % formatNumber(result['totalCommission']))
self.output(u'总滑点:\t%s' % formatNumber(result['totalSlippage']))
self.output(u'总成交金额:\t%s' % formatNumber(result['totalTurnover']))
self.output(u'总成交笔数:\t%s' % formatNumber(result['totalTradeCount']))
self.output(u'日均盈亏:\t%s' % formatNumber(result['dailyNetPnl']))
self.output(u'日均手续费:\t%s' % formatNumber(result['dailyCommission']))
self.output(u'日均滑点:\t%s' % formatNumber(result['dailySlippage']))
self.output(u'日均成交金额:\t%s' % formatNumber(result['dailyTurnover']))
self.output(u'日均成交笔数:\t%s' % formatNumber(result['dailyTradeCount']))
self.output(u'日均收益率:\t%s%%' % formatNumber(result['dailyReturn']))
self.output(u'收益标准差:\t%s%%' % formatNumber(result['returnStd']))
self.output(u'Sharpe Ratio:\t%s' % formatNumber(result['sharpeRatio']))
# 绘图
fig = plt.figure(figsize=(10, 16))
pBalance = plt.subplot(4, 1, 1)
pBalance.set_title('Balance')
df['balance'].plot(legend=True)
pDrawdown = plt.subplot(4, 1, 2)
pDrawdown.set_title('Drawdown')
pDrawdown.fill_between(range(len(df)), df['drawdown'].values)
pPnl = plt.subplot(4, 1, 3)
pPnl.set_title('Daily Pnl')
df['netPnl'].plot(kind='bar', legend=False, grid=False, xticks=[])
pKDE = plt.subplot(4, 1, 4)
pKDE.set_title('Daily Pnl Distribution')
df['netPnl'].hist(bins=50)
plt.show()
########################################################################
class TradingResult(object):
"""每笔交易的结果"""
#----------------------------------------------------------------------
def __init__(self, entryPrice, entryDt, exitPrice,
exitDt, volume, rate, slippage, size):
"""Constructor"""
self.entryPrice = entryPrice # 开仓价格
self.exitPrice = exitPrice # 平仓价格
self.entryDt = entryDt # 开仓时间datetime
self.exitDt = exitDt # 平仓时间
self.volume = volume # 交易数量(+/-代表方向)
self.turnover = (self.entryPrice+self.exitPrice)*size*abs(volume) # 成交金额
self.commission = self.turnover*rate # 手续费成本
self.slippage = slippage*2*size*abs(volume) # 滑点成本
self.pnl = ((self.exitPrice - self.entryPrice) * volume * size
- self.commission - self.slippage) # 净盈亏
########################################################################
class DailyResult(object):
"""每日交易的结果"""
#----------------------------------------------------------------------
def __init__(self, date, closePrice):
"""Constructor"""
self.date = date # 日期
self.closePrice = closePrice # 当日收盘价
self.previousClose = 0 # 昨日收盘价
self.tradeList = [] # 成交列表
self.tradeCount = 0 # 成交数量
self.openPosition = 0 # 开盘时的持仓
self.closePosition = 0 # 收盘时的持仓
self.tradingPnl = 0 # 交易盈亏
self.positionPnl = 0 # 持仓盈亏
self.totalPnl = 0 # 总盈亏
self.turnover = 0 # 成交量
self.commission = 0 # 手续费
self.slippage = 0 # 滑点
self.netPnl = 0 # 净盈亏
#----------------------------------------------------------------------
def addTrade(self, trade):
"""添加交易"""
self.tradeList.append(trade)
#----------------------------------------------------------------------
def calculatePnl(self, openPosition=0, size=1, rate=0, slippage=0):
"""
计算盈亏
size: 合约乘数
rate:手续费率
slippage:滑点点数
"""
# 持仓部分
self.openPosition = openPosition
self.positionPnl = self.openPosition * (self.closePrice - self.previousClose) * size
self.closePosition = self.openPosition
# 交易部分
self.tradeCount = len(self.tradeList)
for trade in self.tradeList:
if trade.direction == DIRECTION_LONG:
posChange = trade.volume
else:
posChange = -trade.volume
self.tradingPnl += posChange * (self.closePrice - trade.price) * size
self.closePosition += posChange
self.turnover += trade.price * trade.volume * size
self.commission += trade.price * trade.volume * size * rate
self.slippage += trade.volume * size * slippage
# 汇总
self.totalPnl = self.tradingPnl + self.positionPnl
self.netPnl = self.totalPnl - self.commission - self.slippage
########################################################################
class OptimizationSetting(object):
"""优化设置"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.paramDict = OrderedDict()
self.optimizeTarget = '' # 优化目标字段
#----------------------------------------------------------------------
def addParameter(self, name, start, end=None, step=None):
"""增加优化参数"""
if end is None and step is None:
self.paramDict[name] = [start]
return
if end < start:
print(u'参数起始点必须不大于终止点')
return
if step <= 0:
print(u'参数布进必须大于0')
return
l = []
param = start
while param <= end:
l.append(param)
param += step
self.paramDict[name] = l
#----------------------------------------------------------------------
def generateSetting(self):
"""生成优化参数组合"""
# 参数名的列表
nameList = self.paramDict.keys()
paramList = self.paramDict.values()
# 使用迭代工具生产参数对组合
productList = list(product(*paramList))
# 把参数对组合打包到一个个字典组成的列表中
settingList = []
for p in productList:
d = dict(zip(nameList, p))
settingList.append(d)
return settingList
#----------------------------------------------------------------------
def setOptimizeTarget(self, target):
"""设置优化目标字段"""
self.optimizeTarget = target
########################################################################
class HistoryDataServer(RpcServer):
"""历史数据缓存服务器"""
#----------------------------------------------------------------------
def __init__(self, repAddress, pubAddress):
"""Constructor"""
super(HistoryDataServer, self).__init__(repAddress, pubAddress)
self.dbClient = pymongo.MongoClient(globalSetting['mongoHost'],
globalSetting['mongoPort'])
self.historyDict = {}
self.register(self.loadHistoryData)
#----------------------------------------------------------------------
def loadHistoryData(self, dbName, symbol, start, end):
""""""
# 首先检查是否有缓存,如果有则直接返回
history = self.historyDict.get((dbName, symbol, start, end), None)
if history:
print(u'找到内存缓存:%s %s %s %s' %(dbName, symbol, start, end))
return history
# 否则从数据库加载
collection = self.dbClient[dbName][symbol]
if end:
flt = {'datetime':{'$gte':start, '$lt':end}}
else:
flt = {'datetime':{'$gte':start}}
cx = collection.find(flt).sort('datetime')
history = [d for d in cx]
self.historyDict[(dbName, symbol, start, end)] = history
print(u'从数据库加载:%s %s %s %s' %(dbName, symbol, start, end))
return history
#----------------------------------------------------------------------
def runHistoryDataServer():
""""""
repAddress = 'tcp://*:5555'
pubAddress = 'tcp://*:7777'
hds = HistoryDataServer(repAddress, pubAddress)
hds.start()
print(u'按任意键退出')
hds.stop()
raw_input()
#----------------------------------------------------------------------
def formatNumber(n):
"""格式化数字到字符串"""
rn = round(n, 2) # 保留两位小数
return format(rn, ',') # 加上千分符
#----------------------------------------------------------------------
def optimize(strategyClass, setting, targetName,
mode, startDate, initDays, endDate,
slippage, rate, size, priceTick,
dbName, symbol):
"""多进程优化时跑在每个进程中运行的函数"""
engine = BacktestingEngine()
engine.setBacktestingMode(mode)
engine.setStartDate(startDate, initDays)
engine.setEndDate(endDate)
engine.setSlippage(slippage)
engine.setRate(rate)
engine.setSize(size)
engine.setPriceTick(priceTick)
engine.setDatabase(dbName, symbol)
engine.initStrategy(strategyClass, setting)
engine.runBacktesting()
df = engine.calculateDailyResult()
df, d = engine.calculateDailyStatistics(df)
try:
targetValue = d[targetName]
except KeyError:
targetValue = 0
return (str(setting), targetValue, d)
| mit |
solintegra/addons | account_payment/wizard/__init__.py | 436 | 1144 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_payment_order
import account_payment_populate_statement
import account_payment_pay
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Qalthos/ansible | test/units/modules/storage/netapp/test_na_ontap_export_policy_rule.py | 37 | 9740 | # (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit test template for ONTAP Ansible module '''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_export_policy_rule \
import NetAppontapExportRule as policy_rule # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, data=None):
''' save arguments '''
self.kind = kind
self.data = data
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.kind == 'rule':
xml = self.build_policy_rule(self.data)
if self.kind == 'rules':
xml = self.build_policy_rule(self.data, multiple=True)
if self.kind == 'policy':
xml = self.build_policy()
self.xml_out = xml
return xml
@staticmethod
def build_policy_rule(policy, multiple=False):
''' build xml data for vserser-info '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = {'attributes-list': {
'export-rule-info': {
'policy-name': policy['name'],
'client-match': policy['client_match'],
'ro-rule': {
'security-flavor': 'any'
},
'rw-rule': {
'security-flavor': 'any'
},
'protocol': {
'access-protocol': policy['protocol']
},
'super-user-security': {
'security-flavor': 'any'
},
'is-allow-set-uid-enabled': 'false',
'rule-index': policy['rule_index']
}
}, 'num-records': 2 if multiple is True else 1}
xml.translate_struct(attributes)
return xml
@staticmethod
def build_policy():
''' build xml data for export-policy-get-iter '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = {
'num-records': 1,
}
xml.translate_struct(attributes)
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.server = MockONTAPConnection()
self.mock_rule = {
'name': 'test',
'protocol': 'nfs',
'client_match': '1.1.1.0',
'rule_index': 10
}
def mock_rule_args(self):
return {
'name': self.mock_rule['name'],
'client_match': self.mock_rule['client_match'],
'vserver': 'test',
'protocol': self.mock_rule['protocol'],
'rule_index': self.mock_rule['rule_index'],
'ro_rule': 'any',
'rw_rule': 'any',
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!'
}
def get_mock_object(self, kind=None):
"""
Helper method to return an na_ontap_firewall_policy object
:param kind: passes this param to MockONTAPConnection()
:return: na_ontap_firewall_policy object
"""
obj = policy_rule()
obj.autosupport_log = Mock(return_value=None)
if kind is None:
obj.server = MockONTAPConnection()
else:
obj.server = MockONTAPConnection(kind=kind, data=self.mock_rule_args())
return obj
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
policy_rule()
print('Info: %s' % exc.value.args[0]['msg'])
def test_get_nonexistent_rule(self):
''' Test if get_export_policy_rule returns None for non-existent policy '''
set_module_args(self.mock_rule_args())
result = self.get_mock_object().get_export_policy_rule()
assert result is None
def test_get_nonexistent_policy(self):
''' Test if get_export_policy returns None for non-existent policy '''
set_module_args(self.mock_rule_args())
result = self.get_mock_object().get_export_policy()
assert result is None
def test_get_existing_rule(self):
''' Test if get_export_policy_rule returns rule details for existing policy '''
data = self.mock_rule_args()
set_module_args(data)
result = self.get_mock_object('rule').get_export_policy_rule()
assert result['name'] == data['name']
assert result['client_match'] == data['client_match']
assert result['ro_rule'] == ['any'] # from build_rule()
def test_get_existing_policy(self):
''' Test if get_export_policy returns policy details for existing policy '''
data = self.mock_rule_args()
set_module_args(data)
result = self.get_mock_object('policy').get_export_policy()
assert result is not None
def test_create_missing_param_error(self):
''' Test validation error from create '''
data = self.mock_rule_args()
del data['ro_rule']
set_module_args(data)
with pytest.raises(AnsibleFailJson) as exc:
self.get_mock_object().apply()
msg = 'Error: Missing required param for creating export policy rule ro_rule'
assert exc.value.args[0]['msg'] == msg
def test_successful_create(self):
''' Test successful create '''
set_module_args(self.mock_rule_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_mock_object().apply()
assert exc.value.args[0]['changed']
def test_create_idempotency(self):
''' Test create idempotency '''
set_module_args(self.mock_rule_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_mock_object('rule').apply()
assert not exc.value.args[0]['changed']
def test_successful_delete_without_rule_index(self):
''' Test delete existing job '''
data = self.mock_rule_args()
data['state'] = 'absent'
del data['rule_index']
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_mock_object('rule').apply()
assert exc.value.args[0]['changed']
def test_delete_idempotency(self):
''' Test delete idempotency '''
data = self.mock_rule_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_mock_object().apply()
assert not exc.value.args[0]['changed']
def test_successful_modify(self):
''' Test successful modify protocol '''
data = self.mock_rule_args()
data['protocol'] = ['cifs']
data['allow_suid'] = 'true'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_mock_object('rule').apply()
assert exc.value.args[0]['changed']
def test_error_on_ambiguous_delete(self):
''' Test error if multiple entries match for a delete '''
data = self.mock_rule_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleFailJson) as exc:
self.get_mock_object('rules').apply()
msg = "Multiple export policy rules exist.Please specify a rule_index to delete"
assert exc.value.args[0]['msg'] == msg
def test_helper_query_parameters(self):
''' Test helper method set_query_parameters() '''
data = self.mock_rule_args()
set_module_args(data)
result = self.get_mock_object('rule').set_query_parameters()
print(str(result))
assert 'query' in result
assert 'export-rule-info' in result['query']
assert result['query']['export-rule-info']['rule-index'] == data['rule_index']
| gpl-3.0 |
fernandezcuesta/ansible | lib/ansible/modules/cloud/ovirt/ovirt_nics_facts.py | 45 | 3943 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_nics_facts
short_description: Retrieve facts about one or more oVirt/RHV virtual machine network interfaces
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt/RHV virtual machine network interfaces."
notes:
- "This module creates a new top-level C(ovirt_nics) fact, which
contains a list of NICs."
options:
vm:
description:
- "Name of the VM where NIC is attached."
required: true
name:
description:
- "Name of the NIC, can be used as glob expression."
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all NICs which names start with C(eth) for VM named C(centos7):
- ovirt_nics_facts:
vm: centos7
name: eth*
- debug:
var: ovirt_nics
'''
RETURN = '''
ovirt_nics:
description: "List of dictionaries describing the network interfaces. NIC attribues are mapped to dictionary keys,
all NICs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/nic."
returned: On success.
type: list
'''
import fnmatch
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
search_by_name,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
vm=dict(required=True),
name=dict(default=None),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
vms_service = connection.system_service().vms_service()
vm_name = module.params['vm']
vm = search_by_name(vms_service, vm_name)
if vm is None:
raise Exception("VM '%s' was not found." % vm_name)
nics_service = vms_service.service(vm.id).nics_service()
if module.params['name']:
nics = [
e for e in nics_service.list()
if fnmatch.fnmatch(e.name, module.params['name'])
]
else:
nics = nics_service.list()
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_nics=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in nics
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
| gpl-3.0 |
simpsonjulian/bofhtools | office365.py | 2 | 1252 | import webbrowser
from time import sleep
from os import environ
import requests
application_id = environ.get('O365_APP_ID')
secret = environ.get('O365_APP_SECRET')
tenant_id = environ.get('O365_APP_TENANT_ID')
headers = {'Content-type': 'application/json'}
payload = {'grant_type': 'client_credentials',
'client_id': application_id,
'client_secret': secret,
# 'resource': 'https://graph.microsoft.com',
'scope': 'https://graph.microsoft.com/.default'}
# 'scope': 'User.ReadBasic.All User.Read.All User.ReadWrite.All Directory.Read.All Directory.ReadWrite.All Directory.AccessAsUser.All'}
login_url = 'https://login.microsoftonline.com/{}/oauth2/v2.0/token'.format(tenant_id)
def register(tenant_id, application_id):
webbrowser.open_new_tab("https://login.microsoftonline.com/{}/adminconsent?client_id={}&state=12345&redirect_uri=http://localhost:5000/auth".format(
tenant_id, application_id))
# register(tenant_id, application_id)
# sleep(30)
r = requests.post(login_url, data=payload)
response = r.json()
# print response
token = response['access_token']
r = requests.get('https://graph.microsoft.com/v1.0/me', headers={'Authorization': 'Bearer {}'.format(token)})
print r.text | gpl-3.0 |
apophys/ipaqe-provision-hosts | ipaqe_provision_hosts/backend/base.py | 1 | 1212 | # author: Milan Kubik
""" Backend base class
The class provides the contract for backend modules
implementing the provisioning of dynamic resources.
"""
from ipaqe_provision_hosts.errors import IPAQEProvisionerError
NOT_IMPLEMENTED_MSG = "You need to override this method in a subclass"
class VMsNotCreatedError(IPAQEProvisionerError):
"""Raise in case VMs could not be provisioned"""
pass
class IDMBackendBase(object):
"""IDMBackendBase class
This class represents a contract between the
idm-prepare-hosts utility and a backend implementation.
"""
def __init__(self, config=None):
self._config = config or {}
self._vms = []
@property
def vms(self):
"""The attribute returns a list of host entries"""
if not self._vms:
raise VMsNotCreatedError("No VMs were provisioned yet")
else:
return self._vms
def provision_resources(self, vm_count):
"""Provision the hosts in a backend"""
raise NotImplementedError(NOT_IMPLEMENTED_MSG)
def delete_resources(self):
"""Delete the resources provisioned by the backend"""
raise NotImplementedError(NOT_IMPLEMENTED_MSG)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.