repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
cm-a7lte/device_samsung_a7lte | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
FlashGordon95/Financial-Portfolio-Flask | venv/lib/python2.7/site-packages/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py | 2360 | 3778 | """The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
__version__ = '3.4.0.2'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
| mit |
MrFastDie/shoutcast-api | bs4/builder/_htmlparser.py | 71 | 9102 | """Use the HTMLParser library to parse HTML files that aren't too bad."""
__all__ = [
'HTMLParserTreeBuilder',
]
from HTMLParser import HTMLParser
try:
from HTMLParser import HTMLParseError
except ImportError, e:
# HTMLParseError is removed in Python 3.5. Since it can never be
# thrown in 3.5, we can just define our own class as a placeholder.
class HTMLParseError(Exception):
pass
import sys
import warnings
# Starting in Python 3.2, the HTMLParser constructor takes a 'strict'
# argument, which we'd like to set to False. Unfortunately,
# http://bugs.python.org/issue13273 makes strict=True a better bet
# before Python 3.2.3.
#
# At the end of this file, we monkeypatch HTMLParser so that
# strict=True works well on Python 3.2.2.
major, minor, release = sys.version_info[:3]
CONSTRUCTOR_TAKES_STRICT = major == 3 and minor == 2 and release >= 3
CONSTRUCTOR_STRICT_IS_DEPRECATED = major == 3 and minor == 3
CONSTRUCTOR_TAKES_CONVERT_CHARREFS = major == 3 and minor >= 4
from bs4.element import (
CData,
Comment,
Declaration,
Doctype,
ProcessingInstruction,
)
from bs4.dammit import EntitySubstitution, UnicodeDammit
from bs4.builder import (
HTML,
HTMLTreeBuilder,
STRICT,
)
HTMLPARSER = 'html.parser'
class BeautifulSoupHTMLParser(HTMLParser):
def handle_starttag(self, name, attrs):
# XXX namespace
attr_dict = {}
for key, value in attrs:
# Change None attribute values to the empty string
# for consistency with the other tree builders.
if value is None:
value = ''
attr_dict[key] = value
attrvalue = '""'
self.soup.handle_starttag(name, None, None, attr_dict)
def handle_endtag(self, name):
self.soup.handle_endtag(name)
def handle_data(self, data):
self.soup.handle_data(data)
def handle_charref(self, name):
# XXX workaround for a bug in HTMLParser. Remove this once
# it's fixed in all supported versions.
# http://bugs.python.org/issue13633
if name.startswith('x'):
real_name = int(name.lstrip('x'), 16)
elif name.startswith('X'):
real_name = int(name.lstrip('X'), 16)
else:
real_name = int(name)
try:
data = unichr(real_name)
except (ValueError, OverflowError), e:
data = u"\N{REPLACEMENT CHARACTER}"
self.handle_data(data)
def handle_entityref(self, name):
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
if character is not None:
data = character
else:
data = "&%s;" % name
self.handle_data(data)
def handle_comment(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(Comment)
def handle_decl(self, data):
self.soup.endData()
if data.startswith("DOCTYPE "):
data = data[len("DOCTYPE "):]
elif data == 'DOCTYPE':
# i.e. "<!DOCTYPE>"
data = ''
self.soup.handle_data(data)
self.soup.endData(Doctype)
def unknown_decl(self, data):
if data.upper().startswith('CDATA['):
cls = CData
data = data[len('CDATA['):]
else:
cls = Declaration
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(cls)
def handle_pi(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(ProcessingInstruction)
class HTMLParserTreeBuilder(HTMLTreeBuilder):
is_xml = False
picklable = True
NAME = HTMLPARSER
features = [NAME, HTML, STRICT]
def __init__(self, *args, **kwargs):
if CONSTRUCTOR_TAKES_STRICT and not CONSTRUCTOR_STRICT_IS_DEPRECATED:
kwargs['strict'] = False
if CONSTRUCTOR_TAKES_CONVERT_CHARREFS:
kwargs['convert_charrefs'] = False
self.parser_args = (args, kwargs)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None, exclude_encodings=None):
"""
:return: A 4-tuple (markup, original encoding, encoding
declared within markup, whether any characters had to be
replaced with REPLACEMENT CHARACTER).
"""
if isinstance(markup, unicode):
yield (markup, None, None, False)
return
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True,
exclude_encodings=exclude_encodings)
yield (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
args, kwargs = self.parser_args
parser = BeautifulSoupHTMLParser(*args, **kwargs)
parser.soup = self.soup
try:
parser.feed(markup)
except HTMLParseError, e:
warnings.warn(RuntimeWarning(
"Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help."))
raise e
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
# string.
#
# XXX This code can be removed once most Python 3 users are on 3.2.3.
if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT:
import re
attrfind_tolerant = re.compile(
r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')
HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend
from html.parser import tagfind, attrfind
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
BeautifulSoupHTMLParser.parse_starttag = parse_starttag
BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode
CONSTRUCTOR_TAKES_STRICT = True
| mit |
Matt-Deacalion/django | tests/auth_tests/test_views.py | 16 | 45028 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import itertools
import os
import re
from importlib import import_module
from django.apps import apps
from django.conf import settings
from django.contrib.admin.models import LogEntry
from django.contrib.auth import REDIRECT_FIELD_NAME, SESSION_KEY
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, SetPasswordForm,
)
from django.contrib.auth.models import User
from django.contrib.auth.tests.custom_user import CustomUser
from django.contrib.auth.views import login as login_view, redirect_to_login
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sites.requests import RequestSite
from django.core import mail
from django.core.urlresolvers import NoReverseMatch, reverse, reverse_lazy
from django.db import connection
from django.http import HttpRequest, QueryDict
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.test import (
TestCase, ignore_warnings, modify_settings, override_settings,
)
from django.test.utils import patch_logger
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text
from django.utils.http import urlquote
from django.utils.six.moves.urllib.parse import ParseResult, urlparse
from django.utils.translation import LANGUAGE_SESSION_KEY
from .models import UUIDUser
from .settings import AUTH_TEMPLATES
@override_settings(
LANGUAGES=[
('en', 'English'),
],
LANGUAGE_CODE='en',
TEMPLATES=AUTH_TEMPLATES,
USE_TZ=False,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='auth_tests.urls',
)
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='testclient',
first_name='Test', last_name='Client', email='testclient@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u2 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='inactive',
first_name='Inactive', last_name='User', email='testclient2@example.com', is_staff=False, is_active=False,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u3 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='staff',
first_name='Staff', last_name='Member', email='staffmember@example.com', is_staff=True, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u4 = User.objects.create(
password='', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='empty_password', first_name='Empty', last_name='Password', email='empty_password@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u5 = User.objects.create(
password='$', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unmanageable_password', first_name='Unmanageable', last_name='Password',
email='unmanageable_password@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u6 = User.objects.create(
password='foo$bar', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unknown_password', first_name='Unknown', last_name='Password',
email='unknown_password@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
def login(self, username='testclient', password='password'):
response = self.client.post('/login/', {
'username': username,
'password': password,
})
self.assertIn(SESSION_KEY, self.client.session)
return response
def logout(self):
response = self.client.get('/admin/logout/')
self.assertEqual(response.status_code, 200)
self.assertNotIn(SESSION_KEY, self.client.session)
def assertFormError(self, response, error):
"""Assert that error is found in response.context['form'] errors"""
form_errors = list(itertools.chain(*response.context['form'].errors.values()))
self.assertIn(force_text(error), form_errors)
def assertURLEqual(self, url, expected, parse_qs=False):
"""
Given two URLs, make sure all their components (the ones given by
urlparse) are equal, only comparing components that are present in both
URLs.
If `parse_qs` is True, then the querystrings are parsed with QueryDict.
This is useful if you don't want the order of parameters to matter.
Otherwise, the query strings are compared as-is.
"""
fields = ParseResult._fields
for attr, x, y in zip(fields, urlparse(url), urlparse(expected)):
if parse_qs and attr == 'query':
x, y = QueryDict(x), QueryDict(y)
if x and y and x != y:
self.fail("%r != %r (%s doesn't match)" % (url, expected, attr))
@override_settings(ROOT_URLCONF='django.contrib.auth.urls')
class AuthViewNamedURLTests(AuthViewsTestCase):
def test_named_urls(self):
"Named URLs should be reversible"
expected_named_urls = [
('login', [], {}),
('logout', [], {}),
('password_change', [], {}),
('password_change_done', [], {}),
('password_reset', [], {}),
('password_reset_done', [], {}),
('password_reset_confirm', [], {
'uidb64': 'aaaaaaa',
'token': '1111-aaaaa',
}),
('password_reset_complete', [], {}),
]
for name, args, kwargs in expected_named_urls:
try:
reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name)
class PasswordResetTest(AuthViewsTestCase):
def test_email_not_found(self):
"""If the provided email is not registered, don't raise any error but
also don't send any email."""
response = self.client.get('/password_reset/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("http://", mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# optional multipart text/html email has been added. Make sure original,
# default functionality is 100% the same
self.assertFalse(mail.outbox[0].message().is_multipart())
def test_extra_email_context(self):
"""
extra_email_context should be available in the email template context.
"""
response = self.client.post(
'/password_reset_extra_email_context/',
{'email': 'staffmember@example.com'},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Email email context: "Hello!"', mail.outbox[0].body)
def test_html_mail_template(self):
"""
A multipart email with text/plain and text/html is sent
if the html_email_template parameter is passed to the view
"""
response = self.client.post('/password_reset/html_email_template/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertNotIn('<html>', message.get_payload(0).get_payload())
self.assertIn('<html>', message.get_payload(1).get_payload())
def test_email_found_custom_from(self):
"Email is sent if a valid email address is provided for password reset when a custom from_email is provided."
response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual("staffmember@example.com", mail.outbox[0].from_email)
@ignore_warnings(category=RemovedInDjango110Warning)
@override_settings(ALLOWED_HOSTS=['adminsite.com'])
def test_admin_reset(self):
"If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override."
response = self.client.post('/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='adminsite.com'
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("http://adminsite.com", mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host(self):
"Poisoned HTTP_HOST headers can't be used for reset emails"
# This attack is based on the way browsers handle URLs. The colon
# should be used to separate the port, but if the URL contains an @,
# the colon is interpreted as part of a username for login purposes,
# making 'evil.com' the request domain. Since HTTP_HOST is used to
# produce a meaningful reset URL, we need to be certain that the
# HTTP_HOST header isn't poisoned. This is done as a check when get_host()
# is invoked, but we check here as a practical consequence.
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host_admin_site(self):
"Poisoned HTTP_HOST headers can't be used for reset emails on admin views"
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
def _test_confirm_start(self):
# Start by creating the email
self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertIsNotNone(urlmatch, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# Let's munge the token in the path, but keep the same length,
# in case the URLconf will reject a different length.
path = path[:-5] + ("0" * 4) + path[-1]
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_user(self):
# Ensure that we get a 200 response for a non-existent user, not a 404
response = self.client.get('/reset/123456/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_overflow_user(self):
# Ensure that we get a 200 response for a base36 user id that overflows int
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_post(self):
# Same as test_confirm_invalid, but trying
# to do a POST instead.
url, path = self._test_confirm_start()
path = path[:-5] + ("0" * 4) + path[-1]
self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': ' anewpassword',
})
# Check the password has not been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(not u.check_password("anewpassword"))
def test_confirm_complete(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
# Check the password has been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(u.check_password("anewpassword"))
# Check we can't use the link again
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_different_passwords(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'x'})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_reset_redirect_default(self):
response = self.client.post('/password_reset/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/done/')
def test_reset_custom_redirect(self):
response = self.client.post('/password_reset/custom_redirect/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_reset_custom_redirect_named(self):
response = self.client.post('/password_reset/custom_redirect/named/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_redirect_default(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/reset/done/')
def test_confirm_redirect_custom(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_confirm_redirect_custom_named(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/named/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_display_user_from_form(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# #16919 -- The ``password_reset_confirm`` view should pass the user
# object to the ``SetPasswordForm``, even on GET requests.
# For this test, we render ``{{ form.user }}`` in the template
# ``registration/password_reset_confirm.html`` so that we can test this.
username = User.objects.get(email='staffmember@example.com').username
self.assertContains(response, "Hello, %s." % username)
# However, the view should NOT pass any user object on a form if the
# password reset link was invalid.
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "Hello, .")
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
class CustomUserPasswordResetTest(AuthViewsTestCase):
user_email = 'staffmember@example.com'
@classmethod
def setUpTestData(cls):
cls.u1 = CustomUser.custom_objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), email='staffmember@example.com', is_active=True,
is_admin=False, date_of_birth=datetime.date(1976, 11, 8)
)
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': self.user_email})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertIsNotNone(urlmatch, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid_custom_user(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
# then submit a new password
response = self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': 'anewpassword',
})
self.assertRedirects(response, '/reset/done/')
@override_settings(AUTH_USER_MODEL='auth.UUIDUser')
class UUIDUserPasswordResetTest(CustomUserPasswordResetTest):
def _test_confirm_start(self):
# instead of fixture
UUIDUser.objects.create_user(
email=self.user_email,
username='foo',
password='foo',
)
return super(UUIDUserPasswordResetTest, self)._test_confirm_start()
class ChangePasswordTest(AuthViewsTestCase):
def fail_login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertFormError(response, AuthenticationForm.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})
def logout(self):
self.client.get('/logout/')
def test_password_change_fails_with_invalid_old_password(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'donuts',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertFormError(response, PasswordChangeForm.error_messages['password_incorrect'])
def test_password_change_fails_with_mismatched_passwords(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'donuts',
})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_password_change_succeeds(self):
self.login()
self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.fail_login()
self.login(password='password1')
def test_password_change_done_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
@override_settings(LOGIN_URL='/login/')
def test_password_change_done_fails(self):
response = self.client.get('/password_change/done/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/?next=/password_change/done/')
def test_password_change_redirect_default(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
def test_password_change_redirect_custom(self):
self.login()
response = self.client.post('/password_change/custom/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_password_change_redirect_custom_named(self):
self.login()
response = self.client.post('/password_change/custom/named/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
@modify_settings(MIDDLEWARE_CLASSES={
'append': 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
})
class SessionAuthenticationTests(AuthViewsTestCase):
def test_user_password_change_updates_session(self):
"""
#21649 - Ensure contrib.auth.views.password_change updates the user's
session auth hash after a password change so the session isn't logged out.
"""
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
# if the hash isn't updated, retrieving the redirection page will fail.
self.assertRedirects(response, '/password_change/done/')
class LoginTest(AuthViewsTestCase):
def test_current_site_in_context_after_login(self):
response = self.client.get(reverse('login'))
self.assertEqual(response.status_code, 200)
if apps.is_installed('django.contrib.sites'):
Site = apps.get_model('sites.Site')
site = Site.objects.get_current()
self.assertEqual(response.context['site'], site)
self.assertEqual(response.context['site_name'], site.name)
else:
self.assertIsInstance(response.context['site'], RequestSite)
self.assertIsInstance(response.context['form'], AuthenticationForm)
def test_security_check(self, password='password'):
login_url = reverse('login')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'http:///example.com',
'https://example.com',
'ftp://exampel.com',
'///example.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
response = self.client.post(nasty_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertNotIn(bad_url, response.url,
"%s should be blocked" % bad_url)
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
response = self.client.post(safe_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertIn(good_url, response.url, "%s should be allowed" % good_url)
def test_login_form_contains_request(self):
# 15198
self.client.post('/custom_requestauth_login/', {
'username': 'testclient',
'password': 'password',
}, follow=True)
# the custom authentication form used by this login asserts
# that a request is passed to the form successfully.
def test_login_csrf_rotate(self, password='password'):
"""
Makes sure that a login rotates the currently-used CSRF token.
"""
# Do a GET to establish a CSRF token
# TestClient isn't used here as we're testing middleware, essentially.
req = HttpRequest()
CsrfViewMiddleware().process_view(req, login_view, (), {})
# get_token() triggers CSRF token inclusion in the response
get_token(req)
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token1 = csrf_cookie.coded_value
# Prepare the POST request
req = HttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = token1
req.method = "POST"
req.POST = {'username': 'testclient', 'password': password, 'csrfmiddlewaretoken': token1}
# Use POST request to log in
SessionMiddleware().process_request(req)
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["SERVER_NAME"] = "testserver" # Required to have redirect work in login view
req.META["SERVER_PORT"] = 80
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token2 = csrf_cookie.coded_value
# Check the CSRF token switched
self.assertNotEqual(token1, token2)
def test_session_key_flushed_on_login(self):
"""
To avoid reusing another user's session, ensure a new, empty session is
created if the existing session corresponds to a different authenticated
user.
"""
self.login()
original_session_key = self.client.session.session_key
self.login(username='staff')
self.assertNotEqual(original_session_key, self.client.session.session_key)
def test_session_key_flushed_on_login_after_password_change(self):
"""
As above, but same user logging in after a password change.
"""
self.login()
original_session_key = self.client.session.session_key
# If no password change, session key should not be flushed.
self.login()
self.assertEqual(original_session_key, self.client.session.session_key)
user = User.objects.get(username='testclient')
user.set_password('foobar')
user.save()
self.login(password='foobar')
self.assertNotEqual(original_session_key, self.client.session.session_key)
def test_login_session_without_hash_session_key(self):
"""
Session without django.contrib.auth.HASH_SESSION_KEY should login
without an exception.
"""
user = User.objects.get(username='testclient')
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session[SESSION_KEY] = user.id
session.save()
original_session_key = session.session_key
self.client.cookies[settings.SESSION_COOKIE_NAME] = original_session_key
self.login()
self.assertNotEqual(original_session_key, self.client.session.session_key)
class LoginURLSettings(AuthViewsTestCase):
"""Tests for settings.LOGIN_URL."""
def assertLoginURLEquals(self, url, parse_qs=False):
response = self.client.get('/login_required/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url, parse_qs=parse_qs)
@override_settings(LOGIN_URL='/login/')
def test_standard_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='login')
def test_named_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='http://remote.example.com/login')
def test_remote_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='https:///login/')
def test_https_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'https:///login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='/login/?pretty=1')
def test_login_url_with_querystring(self):
self.assertLoginURLEquals('/login/?pretty=1&next=/login_required/', parse_qs=True)
@override_settings(LOGIN_URL='http://remote.example.com/login/?next=/default/')
def test_remote_login_url_with_next_querystring(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_lazy_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
class LoginRedirectUrlTest(AuthViewsTestCase):
"""Tests for settings.LOGIN_REDIRECT_URL."""
def assertLoginRedirectURLEqual(self, url):
response = self.login()
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url)
def test_default(self):
self.assertLoginRedirectURLEqual('/accounts/profile/')
@override_settings(LOGIN_REDIRECT_URL='/custom/')
def test_custom(self):
self.assertLoginRedirectURLEqual('/custom/')
@override_settings(LOGIN_REDIRECT_URL='password_reset')
def test_named(self):
self.assertLoginRedirectURLEqual('/password_reset/')
@override_settings(LOGIN_REDIRECT_URL='http://remote.example.com/welcome/')
def test_remote(self):
self.assertLoginRedirectURLEqual('http://remote.example.com/welcome/')
class RedirectToLoginTests(AuthViewsTestCase):
"""Tests for the redirect_to_login view"""
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_redirect_to_login_with_lazy(self):
login_redirect_response = redirect_to_login(next='/else/where/')
expected = '/login/?next=/else/where/'
self.assertEqual(expected, login_redirect_response.url)
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_redirect_to_login_with_lazy_and_unicode(self):
login_redirect_response = redirect_to_login(next='/else/where/झ/')
expected = '/login/?next=/else/where/%E0%A4%9D/'
self.assertEqual(expected, login_redirect_response.url)
class LogoutTest(AuthViewsTestCase):
def confirm_logged_out(self):
self.assertNotIn(SESSION_KEY, self.client.session)
def test_logout_default(self):
"Logout without next_page option renders the default template"
self.login()
response = self.client.get('/logout/')
self.assertContains(response, 'Logged out')
self.confirm_logged_out()
def test_14377(self):
# Bug 14377
self.login()
response = self.client.get('/logout/')
self.assertIn('site', response.context)
def test_logout_with_overridden_redirect_url(self):
# Bug 11223
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
response = self.client.get('/logout/next_page/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_next_page_specified(self):
"Logout with next_page option given redirects to specified resource"
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_redirect_argument(self):
"Logout with query string redirects to specified resource"
self.login()
response = self.client.get('/logout/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_custom_redirect_argument(self):
"Logout with custom query string redirects to specified resource"
self.login()
response = self.client.get('/logout/custom_query/?follow=/somewhere/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_named_redirect(self):
"Logout resolves names or URLs passed as next_page."
self.login()
response = self.client.get('/logout/next_page/named/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
self.confirm_logged_out()
def test_security_check(self, password='password'):
logout_url = reverse('logout')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'http:///example.com',
'https://example.com',
'ftp://exampel.com',
'///example.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
self.login()
response = self.client.get(nasty_url)
self.assertEqual(response.status_code, 302)
self.assertNotIn(bad_url, response.url,
"%s should be blocked" % bad_url)
self.confirm_logged_out()
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
self.login()
response = self.client.get(safe_url)
self.assertEqual(response.status_code, 302)
self.assertIn(good_url, response.url, "%s should be allowed" % good_url)
self.confirm_logged_out()
def test_logout_preserve_language(self):
"""Check that language stored in session is preserved after logout"""
# Create a new session with language
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session[LANGUAGE_SESSION_KEY] = 'pl'
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.client.get('/logout/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], 'pl')
# Redirect in test_user_change_password will fail if session auth hash
# isn't updated after password change (#21649)
@modify_settings(MIDDLEWARE_CLASSES={
'append': 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
})
@override_settings(
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='auth_tests.urls_admin',
)
class ChangelistTests(AuthViewsTestCase):
def setUp(self):
# Make me a superuser before logging in.
User.objects.filter(username='testclient').update(is_staff=True, is_superuser=True)
self.login()
self.admin = User.objects.get(pk=self.u1.pk)
def get_user_data(self, user):
return {
'username': user.username,
'password': user.password,
'email': user.email,
'is_active': user.is_active,
'is_staff': user.is_staff,
'is_superuser': user.is_superuser,
'last_login_0': user.last_login.strftime('%Y-%m-%d'),
'last_login_1': user.last_login.strftime('%H:%M:%S'),
'initial-last_login_0': user.last_login.strftime('%Y-%m-%d'),
'initial-last_login_1': user.last_login.strftime('%H:%M:%S'),
'date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'initial-date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'initial-date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'first_name': user.first_name,
'last_name': user.last_name,
}
# #20078 - users shouldn't be allowed to guess password hashes via
# repeated password__startswith queries.
def test_changelist_disallows_password_lookups(self):
# A lookup that tries to filter on password isn't OK
with patch_logger('django.security.DisallowedModelAdminLookup', 'error') as logger_calls:
response = self.client.get(reverse('auth_test_admin:auth_user_changelist') + '?password__startswith=sha1$')
self.assertEqual(response.status_code, 400)
self.assertEqual(len(logger_calls), 1)
def test_user_change_email(self):
data = self.get_user_data(self.admin)
data['email'] = 'new_' + data['email']
response = self.client.post(
reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,)),
data
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_changelist'))
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed email.')
def test_user_not_change(self):
response = self.client.post(
reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,)),
self.get_user_data(self.admin)
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_changelist'))
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'No fields changed.')
def test_user_change_password(self):
user_change_url = reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,))
password_change_url = reverse('auth_test_admin:auth_user_password_change', args=(self.admin.pk,))
response = self.client.get(user_change_url)
# Test the link inside password field help_text.
rel_link = re.search(
r'you can change the password using <a href="([^"]*)">this form</a>',
force_text(response.content)
).groups()[0]
self.assertEqual(
os.path.normpath(user_change_url + rel_link),
os.path.normpath(password_change_url)
)
response = self.client.post(
password_change_url,
{
'password1': 'password1',
'password2': 'password1',
}
)
self.assertRedirects(response, user_change_url)
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed password.')
self.logout()
self.login(password='password1')
def test_user_change_different_user_password(self):
u = User.objects.get(email='staffmember@example.com')
response = self.client.post(
reverse('auth_test_admin:auth_user_password_change', args=(u.pk,)),
{
'password1': 'password1',
'password2': 'password1',
}
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_change', args=(u.pk,)))
row = LogEntry.objects.latest('id')
self.assertEqual(row.user_id, self.admin.pk)
self.assertEqual(row.object_id, str(u.pk))
self.assertEqual(row.change_message, 'Changed password.')
def test_password_change_bad_url(self):
response = self.client.get(reverse('auth_test_admin:auth_user_password_change', args=('foobar',)))
self.assertEqual(response.status_code, 404)
@override_settings(
AUTH_USER_MODEL='auth.UUIDUser',
ROOT_URLCONF='auth_tests.urls_custom_user_admin',
)
class UUIDUserTests(TestCase):
def test_admin_password_change(self):
u = UUIDUser.objects.create_superuser(username='uuid', email='foo@bar.com', password='test')
self.assertTrue(self.client.login(username='uuid', password='test'))
user_change_url = reverse('custom_user_admin:auth_uuiduser_change', args=(u.pk,))
response = self.client.get(user_change_url)
self.assertEqual(response.status_code, 200)
password_change_url = reverse('custom_user_admin:auth_user_password_change', args=(u.pk,))
response = self.client.get(password_change_url)
self.assertEqual(response.status_code, 200)
# A LogEntry is created with pk=1 which breaks a FK constraint on MySQL
with connection.constraint_checks_disabled():
response = self.client.post(password_change_url, {
'password1': 'password1',
'password2': 'password1',
})
self.assertRedirects(response, user_change_url)
row = LogEntry.objects.latest('id')
self.assertEqual(row.user_id, 1) # harcoded in CustomUserAdmin.log_change()
self.assertEqual(row.object_id, str(u.pk))
self.assertEqual(row.change_message, 'Changed password.')
| bsd-3-clause |
alexschiller/osf.io | website/oauth/models/__init__.py | 6 | 23632 | # -*- coding: utf-8 -*-
import abc
import logging
import datetime
import functools
import httplib as http
import time
import urlparse
import uuid
from flask import request
from django.utils import timezone
from oauthlib.oauth2.rfc6749.errors import MissingTokenError
from requests.exceptions import HTTPError as RequestsHTTPError
from modularodm import fields, Q
from modularodm.validators import MaxLengthValidator, URLValidator
from requests_oauthlib import OAuth1Session
from requests_oauthlib import OAuth2Session
from osf.exceptions import ValidationError
from framework.auth import cas
from framework.encryption import EncryptedStringField
from framework.exceptions import HTTPError, PermissionsError
from framework.mongo import ObjectId, StoredObject
from framework.mongo.utils import unique_on
from framework.mongo.validators import string_required
from framework.sessions import session
from website import settings
from website.oauth.utils import PROVIDER_LOOKUP
from website.security import random_string
from website.util import web_url_for, api_v2_url
logger = logging.getLogger(__name__)
OAUTH1 = 1
OAUTH2 = 2
generate_client_secret = functools.partial(random_string, length=40)
@unique_on(['provider', 'provider_id'])
class ExternalAccount(StoredObject):
"""An account on an external service.
Note that this object is not and should not be aware of what other objects
are associated with it. This is by design, and this object should be kept as
thin as possible, containing only those fields that must be stored in the
database.
The ``provider`` field is a de facto foreign key to an ``ExternalProvider``
object, as providers are not stored in the database.
"""
_id = fields.StringField(default=lambda: str(ObjectId()), primary=True)
# The OAuth credentials. One or both of these fields should be populated.
# For OAuth1, this is usually the "oauth_token"
# For OAuth2, this is usually the "access_token"
oauth_key = EncryptedStringField()
# For OAuth1, this is usually the "oauth_token_secret"
# For OAuth2, this is not used
oauth_secret = EncryptedStringField()
# Used for OAuth2 only
refresh_token = EncryptedStringField()
date_last_refreshed = fields.DateTimeField()
expires_at = fields.DateTimeField()
scopes = fields.StringField(list=True, default=lambda: list())
# The `name` of the service
# This lets us query for only accounts on a particular provider
provider = fields.StringField(required=True)
# The proper 'name' of the service
# Needed for account serialization
provider_name = fields.StringField(required=True)
# The unique, persistent ID on the remote service.
provider_id = fields.StringField()
# The user's name on the external service
display_name = EncryptedStringField()
# A link to the user's profile on the external service
profile_url = EncryptedStringField()
def __repr__(self):
return '<ExternalAccount: {}/{}>'.format(self.provider,
self.provider_id)
class ExternalProviderMeta(abc.ABCMeta):
"""Keeps track of subclasses of the ``ExternalProvider`` object"""
def __init__(cls, name, bases, dct):
super(ExternalProviderMeta, cls).__init__(name, bases, dct)
if not isinstance(cls.short_name, abc.abstractproperty):
PROVIDER_LOOKUP[cls.short_name] = cls
class ExternalProvider(object):
"""A connection to an external service (ex: GitHub).
This object contains no credentials, and is not saved in the database.
It provides an unauthenticated session with the provider, unless ``account``
has been set - in which case, it provides a connection authenticated as the
``ExternalAccount`` instance.
Conceptually, this can be thought of as an extension of ``ExternalAccount``.
It's a separate object because this must be subclassed for each provider,
and ``ExternalAccount`` instances are stored within a single collection.
"""
__metaclass__ = ExternalProviderMeta
# Default to OAuth v2.0.
_oauth_version = OAUTH2
# Providers that have expiring tokens must override these
auto_refresh_url = None
refresh_time = 0 # When to refresh the oauth_key (seconds)
expiry_time = 0 # If/When the refresh token expires (seconds). 0 indicates a non-expiring refresh token
def __init__(self, account=None):
super(ExternalProvider, self).__init__()
# provide an unauthenticated session by default
self.account = account
def __repr__(self):
return '<{name}: {status}>'.format(
name=self.__class__.__name__,
status=self.account.provider_id if self.account else 'anonymous'
)
@abc.abstractproperty
def auth_url_base(self):
"""The base URL to begin the OAuth dance"""
pass
@property
def auth_url(self):
"""The URL to begin the OAuth dance.
This property method has side effects - it at least adds temporary
information to the session so that callbacks can be associated with
the correct user. For OAuth1, it calls the provider to obtain
temporary credentials to start the flow.
"""
# create a dict on the session object if it's not already there
if session.data.get('oauth_states') is None:
session.data['oauth_states'] = {}
if self._oauth_version == OAUTH2:
# build the URL
oauth = OAuth2Session(
self.client_id,
redirect_uri=web_url_for('oauth_callback',
service_name=self.short_name,
_absolute=True),
scope=self.default_scopes,
)
url, state = oauth.authorization_url(self.auth_url_base)
# save state token to the session for confirmation in the callback
session.data['oauth_states'][self.short_name] = {'state': state}
elif self._oauth_version == OAUTH1:
# get a request token
oauth = OAuth1Session(
client_key=self.client_id,
client_secret=self.client_secret,
)
# request temporary credentials from the provider
response = oauth.fetch_request_token(self.request_token_url)
# store them in the session for use in the callback
session.data['oauth_states'][self.short_name] = {
'token': response.get('oauth_token'),
'secret': response.get('oauth_token_secret'),
}
url = oauth.authorization_url(self.auth_url_base)
return url
@abc.abstractproperty
def callback_url(self):
"""The provider URL to exchange the code for a token"""
pass
@abc.abstractproperty
def client_id(self):
"""OAuth Client ID. a/k/a: Application ID"""
pass
@abc.abstractproperty
def client_secret(self):
"""OAuth Client Secret. a/k/a: Application Secret, Application Key"""
pass
default_scopes = list()
@abc.abstractproperty
def name(self):
"""Human-readable name of the service. e.g.: ORCiD, GitHub"""
pass
@abc.abstractproperty
def short_name(self):
"""Name of the service to be used internally. e.g.: orcid, github"""
pass
def auth_callback(self, user, **kwargs):
"""Exchange temporary credentials for permanent credentials
This is called in the view that handles the user once they are returned
to the OSF after authenticating on the external service.
"""
if 'error' in request.args:
return False
# make sure the user has temporary credentials for this provider
try:
cached_credentials = session.data['oauth_states'][self.short_name]
except KeyError:
raise PermissionsError('OAuth flow not recognized.')
if self._oauth_version == OAUTH1:
request_token = request.args.get('oauth_token')
# make sure this is the same user that started the flow
if cached_credentials.get('token') != request_token:
raise PermissionsError('Request token does not match')
response = OAuth1Session(
client_key=self.client_id,
client_secret=self.client_secret,
resource_owner_key=cached_credentials.get('token'),
resource_owner_secret=cached_credentials.get('secret'),
verifier=request.args.get('oauth_verifier'),
).fetch_access_token(self.callback_url)
elif self._oauth_version == OAUTH2:
state = request.args.get('state')
# make sure this is the same user that started the flow
if cached_credentials.get('state') != state:
raise PermissionsError('Request token does not match')
try:
response = OAuth2Session(
self.client_id,
redirect_uri=web_url_for(
'oauth_callback',
service_name=self.short_name,
_absolute=True
),
).fetch_token(
self.callback_url,
client_secret=self.client_secret,
code=request.args.get('code'),
)
except (MissingTokenError, RequestsHTTPError):
raise HTTPError(http.SERVICE_UNAVAILABLE)
# pre-set as many values as possible for the ``ExternalAccount``
info = self._default_handle_callback(response)
# call the hook for subclasses to parse values from the response
info.update(self.handle_callback(response))
return self._set_external_account(user, info)
def _set_external_account(self, user, info):
try:
# create a new ``ExternalAccount`` ...
self.account = ExternalAccount(
provider=self.short_name,
provider_id=info['provider_id'],
provider_name=self.name,
)
self.account.save()
except ValidationError:
# ... or get the old one
self.account = ExternalAccount.find_one(
Q('provider', 'eq', self.short_name) &
Q('provider_id', 'eq', info['provider_id'])
)
assert self.account is not None
# ensure that provider_name is correct
self.account.provider_name = self.name
# required
self.account.oauth_key = info['key']
# only for OAuth1
self.account.oauth_secret = info.get('secret')
# only for OAuth2
self.account.expires_at = info.get('expires_at')
self.account.refresh_token = info.get('refresh_token')
self.account.date_last_refreshed = datetime.datetime.utcnow()
# additional information
self.account.display_name = info.get('display_name')
self.account.profile_url = info.get('profile_url')
self.account.save()
# add it to the user's list of ``ExternalAccounts``
if not user.external_accounts.filter(id=self.account.id).exists():
user.external_accounts.add(self.account)
user.save()
return True
def _default_handle_callback(self, data):
"""Parse as much out of the key exchange's response as possible.
This should not be over-ridden in subclasses.
"""
if self._oauth_version == OAUTH1:
key = data.get('oauth_token')
secret = data.get('oauth_token_secret')
values = {}
if key:
values['key'] = key
if secret:
values['secret'] = secret
return values
elif self._oauth_version == OAUTH2:
key = data.get('access_token')
refresh_token = data.get('refresh_token')
expires_at = data.get('expires_at')
scopes = data.get('scope')
values = {}
if key:
values['key'] = key
if scopes:
values['scope'] = scopes
if refresh_token:
values['refresh_token'] = refresh_token
if expires_at:
values['expires_at'] = datetime.datetime.fromtimestamp(
float(expires_at)
)
return values
@abc.abstractmethod
def handle_callback(self, response):
"""Hook for allowing subclasses to parse information from the callback.
Subclasses should implement this method to provide `provider_id`
and `profile_url`.
Values provided by ``self._default_handle_callback`` can be over-ridden
here as well, in the unexpected case that they are parsed incorrectly
by default.
:param response: The JSON returned by the provider during the exchange
:return dict:
"""
pass
def refresh_oauth_key(self, force=False, extra={}, resp_auth_token_key='access_token',
resp_refresh_token_key='refresh_token', resp_expiry_fn=None):
"""Handles the refreshing of an oauth_key for account associated with this provider.
Not all addons need to use this, as some do not have oauth_keys that expire.
Subclasses must define the following for this functionality:
`auto_refresh_url` - URL to use when refreshing tokens. Must use HTTPS
`refresh_time` - Time (in seconds) that the oauth_key should be refreshed after.
Typically half the duration of validity. Cannot be 0.
Providers may have different keywords in their response bodies, kwargs
`resp_*_key` allow subclasses to override these if necessary.
kwarg `resp_expiry_fn` allows subclasses to specify a function that will return the
datetime-formatted oauth_key expiry key, given a successful refresh response from
`auto_refresh_url`. A default using 'expires_at' as a key is provided.
"""
# Ensure this is an authenticated Provider that uses token refreshing
if not (self.account and self.auto_refresh_url):
return False
# Ensure this Provider is for a valid addon
if not (self.client_id and self.client_secret):
return False
# Ensure a refresh is needed
if not (force or self._needs_refresh()):
return False
if self.has_expired_credentials and not force:
return False
resp_expiry_fn = resp_expiry_fn or (lambda x: datetime.datetime.utcfromtimestamp(time.time() + float(x['expires_in'])))
client = OAuth2Session(
self.client_id,
token={
'access_token': self.account.oauth_key,
'refresh_token': self.account.refresh_token,
'token_type': 'Bearer',
'expires_in': '-30',
}
)
extra.update({
'client_id': self.client_id,
'client_secret': self.client_secret
})
token = client.refresh_token(
self.auto_refresh_url,
**extra
)
self.account.oauth_key = token[resp_auth_token_key]
self.account.refresh_token = token[resp_refresh_token_key]
self.account.expires_at = resp_expiry_fn(token)
self.account.date_last_refreshed = datetime.datetime.utcnow()
self.account.save()
return True
def _needs_refresh(self):
"""Determines whether or not an associated ExternalAccount needs
a oauth_key.
return bool: True if needs_refresh
"""
if self.refresh_time and self.account.expires_at:
return (self.account.expires_at - timezone.now()).total_seconds() < self.refresh_time
return False
@property
def has_expired_credentials(self):
"""Determines whether or not an associated ExternalAccount has
expired credentials that can no longer be renewed
return bool: True if cannot be refreshed
"""
if self.expiry_time and self.account.expires_at:
return (timezone.now() - self.account.expires_at).total_seconds() > self.expiry_time
return False
class ApiOAuth2Scope(StoredObject):
"""
Store information about recognized OAuth2 scopes. Only scopes registered under this database model can
be requested by third parties.
"""
_id = fields.StringField(primary=True,
default=lambda: str(ObjectId()))
name = fields.StringField(unique=True, required=True, index=True)
description = fields.StringField(required=True)
is_active = fields.BooleanField(default=True, index=True) # TODO: Add mechanism to deactivate a scope?
class ApiOAuth2Application(StoredObject):
"""Registration and key for user-created OAuth API applications
This collection is also used by CAS to create the master list of available applications.
Any changes made to field names in this model must be echoed in the CAS implementation.
"""
_id = fields.StringField(
primary=True,
default=lambda: str(ObjectId())
)
# Client ID and secret. Use separate ID field so ID format doesn't have to be restricted to database internals.
client_id = fields.StringField(default=lambda: uuid.uuid4().hex, # Not *guaranteed* unique, but very unlikely
unique=True,
index=True)
client_secret = fields.StringField(default=generate_client_secret)
is_active = fields.BooleanField(default=True, # Set to False if application is deactivated
index=True)
owner = fields.ForeignField('User',
index=True,
required=True)
# User-specified application descriptors
name = fields.StringField(index=True, required=True, validate=[string_required, MaxLengthValidator(200)])
description = fields.StringField(required=False, validate=MaxLengthValidator(1000))
date_created = fields.DateTimeField(auto_now_add=True,
editable=False)
home_url = fields.StringField(required=True,
validate=URLValidator())
callback_url = fields.StringField(required=True,
validate=URLValidator())
def deactivate(self, save=False):
"""
Deactivate an ApiOAuth2Application
Does not delete the database record, but revokes all tokens and sets a flag that hides this instance from API
"""
client = cas.get_client()
# Will raise a CasHttpError if deletion fails, which will also stop setting of active=False.
resp = client.revoke_application_tokens(self.client_id, self.client_secret) # noqa
self.is_active = False
if save:
self.save()
return True
def reset_secret(self, save=False):
"""
Reset the secret of an ApiOAuth2Application
Revokes all tokens
"""
client = cas.get_client()
client.revoke_application_tokens(self.client_id, self.client_secret)
self.client_secret = generate_client_secret()
if save:
self.save()
return True
@property
def url(self):
return '/settings/applications/{}/'.format(self.client_id)
@property
def absolute_url(self):
return urlparse.urljoin(settings.DOMAIN, self.url)
# Properties used by Django and DRF "Links: self" field
@property
def absolute_api_v2_url(self):
path = '/applications/{}/'.format(self.client_id)
return api_v2_url(path)
# used by django and DRF
def get_absolute_url(self):
return self.absolute_api_v2_url
class ApiOAuth2PersonalToken(StoredObject):
"""Information for user-created personal access tokens
This collection is also used by CAS to create the master list of available tokens.
Any changes made to field names in this model must be echoed in the CAS implementation.
"""
_id = fields.StringField(primary=True,
default=lambda: str(ObjectId()))
# Name of the field being `token_id` is a CAS requirement.
# This is the actual value of the token that's used to authenticate
token_id = fields.StringField(default=functools.partial(random_string, length=70),
unique=True)
owner = fields.ForeignField('User',
index=True,
required=True)
name = fields.StringField(required=True, index=True)
# This field is a space delimited list of scopes, e.g. "osf.full_read osf.full_write"
scopes = fields.StringField(required=True)
is_active = fields.BooleanField(default=True, index=True)
def deactivate(self, save=False):
"""
Deactivate an ApiOAuth2PersonalToken
Does not delete the database record, but hides this instance from API
"""
client = cas.get_client()
# Will raise a CasHttpError if deletion fails for any reason other than the token
# not yet being created. This will also stop setting of active=False.
try:
resp = client.revoke_tokens({'token': self.token_id}) # noqa
except cas.CasHTTPError as e:
if e.code == 400:
pass # Token hasn't been used yet, so not created in cas
else:
raise e
self.is_active = False
if save:
self.save()
return True
@property
def url(self):
return '/settings/tokens/{}/'.format(self._id)
@property
def absolute_url(self):
return urlparse.urljoin(settings.DOMAIN, self.url)
# Properties used by Django and DRF "Links: self" field
@property
def absolute_api_v2_url(self):
path = '/tokens/{}/'.format(self._id)
return api_v2_url(path)
# used by django and DRF
def get_absolute_url(self):
return self.absolute_api_v2_url
class BasicAuthProviderMixin(object):
"""
Providers utilizing BasicAuth can utilize this class to implement the
storage providers framework by subclassing this mixin. This provides
a translation between the oauth parameters and the BasicAuth parameters.
The password here is kept decrypted by default.
"""
def __init__(self, account=None, host=None, username=None, password=None):
super(BasicAuthProviderMixin, self).__init__()
if account:
self.account = account
elif not account and host and password and username:
self.account = ExternalAccount(
display_name=username,
oauth_key=password,
oauth_secret=host.lower(),
provider_id='{}:{}'.format(host.lower(), username),
profile_url=host.lower(),
provider=self.short_name,
provider_name=self.name
)
else:
self.account = None
@property
def host(self):
return self.account.profile_url
@property
def username(self):
return self.account.display_name
@property
def password(self):
return self.account.oauth_key
| apache-2.0 |
albertjan/pypyjs-presentation | assets/js/pypy.js-0.3.1/lib/modules/commands.py | 264 | 2545 | """Execute shell commands via os.popen() and return status, output.
Interface summary:
import commands
outtext = commands.getoutput(cmd)
(exitstatus, outtext) = commands.getstatusoutput(cmd)
outtext = commands.getstatus(file) # returns output of "ls -ld file"
A trailing newline is removed from the output string.
Encapsulates the basic operation:
pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
text = pipe.read()
sts = pipe.close()
[Note: it would be nice to add functions to interpret the exit status.]
"""
from warnings import warnpy3k
warnpy3k("the commands module has been removed in Python 3.0; "
"use the subprocess module instead", stacklevel=2)
del warnpy3k
__all__ = ["getstatusoutput","getoutput","getstatus"]
# Module 'commands'
#
# Various tools for executing commands and looking at their output and status.
#
# NB This only works (and is only relevant) for UNIX.
# Get 'ls -l' status for an object into a string
#
def getstatus(file):
"""Return output of "ls -ld <file>" in a string."""
import warnings
warnings.warn("commands.getstatus() is deprecated", DeprecationWarning, 2)
return getoutput('ls -ld' + mkarg(file))
# Get the output from a shell command into a string.
# The exit status is ignored; a trailing newline is stripped.
# Assume the command will work with '{ ... ; } 2>&1' around it..
#
def getoutput(cmd):
"""Return output (stdout or stderr) of executing cmd in a shell."""
return getstatusoutput(cmd)[1]
# Ditto but preserving the exit status.
# Returns a pair (sts, output)
#
def getstatusoutput(cmd):
"""Return (status, output) of executing cmd in a shell."""
import os
pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
text = pipe.read()
sts = pipe.close()
if sts is None: sts = 0
if text[-1:] == '\n': text = text[:-1]
return sts, text
# Make command argument from directory and pathname (prefix space, add quotes).
#
def mk2arg(head, x):
import os
return mkarg(os.path.join(head, x))
# Make a shell command argument from a string.
# Return a string beginning with a space followed by a shell-quoted
# version of the argument.
# Two strategies: enclose in single quotes if it contains none;
# otherwise, enclose in double quotes and prefix quotable characters
# with backslash.
#
def mkarg(x):
if '\'' not in x:
return ' \'' + x + '\''
s = ' "'
for c in x:
if c in '\\$"`':
s = s + '\\'
s = s + c
s = s + '"'
return s
| unlicense |
jirutka/ngx-oauth | integration/support/nginx_server.py | 1 | 1260 | import os
from os import path
import shlex
from subprocess import Popen
from time import sleep
from .util import write_file
import requests
from requests import ConnectionError
from retry import retry
__all__ = ['NginxServer']
class NginxServer:
def __init__(self, nginx_conf, check_url, temp_dir='.'):
conf_path = path.join(temp_dir, 'nginx.conf')
write_file(conf_path, nginx_conf)
self._command = "nginx -c %s" % conf_path
self._ngx_process = None
self.check_url = check_url
def start(self):
self._ngx_process = Popen(shlex.split(self._command))
try: # sanity check
resp = self._request_check_url()
except ConnectionError as e:
self.stop()
raise e
if resp.status_code != 200:
raise IOError("Nginx returned %s for GET %s" % (resp.status_code, self.check_url))
def stop(self):
if self._ngx_process is None:
return
try:
self._ngx_process.terminate()
sleep(0.2)
finally:
os.kill(self._ngx_process.pid, 9)
@retry(ConnectionError, tries=20, delay=0.1)
def _request_check_url(self):
return requests.get(self.check_url, verify=False)
| mit |
PopCap/GameIdea | Engine/Source/ThirdParty/HTML5/emsdk/Win64/python/2.7.5.3_64bit/Lib/encodings/cp720.py | 417 | 13694 | """Python Character Mapping Codec cp720 generated on Windows:
Vista 6.0.6002 SP2 Multiprocessor Free with the command:
python Tools/unicode/genwincodec.py 720
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp720',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\x80'
u'\x81'
u'\xe9' # 0x82 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x83 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\x84'
u'\xe0' # 0x85 -> LATIN SMALL LETTER A WITH GRAVE
u'\x86'
u'\xe7' # 0x87 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x88 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x89 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x8A -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x8B -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x8C -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\x8d'
u'\x8e'
u'\x8f'
u'\x90'
u'\u0651' # 0x91 -> ARABIC SHADDA
u'\u0652' # 0x92 -> ARABIC SUKUN
u'\xf4' # 0x93 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xa4' # 0x94 -> CURRENCY SIGN
u'\u0640' # 0x95 -> ARABIC TATWEEL
u'\xfb' # 0x96 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x97 -> LATIN SMALL LETTER U WITH GRAVE
u'\u0621' # 0x98 -> ARABIC LETTER HAMZA
u'\u0622' # 0x99 -> ARABIC LETTER ALEF WITH MADDA ABOVE
u'\u0623' # 0x9A -> ARABIC LETTER ALEF WITH HAMZA ABOVE
u'\u0624' # 0x9B -> ARABIC LETTER WAW WITH HAMZA ABOVE
u'\xa3' # 0x9C -> POUND SIGN
u'\u0625' # 0x9D -> ARABIC LETTER ALEF WITH HAMZA BELOW
u'\u0626' # 0x9E -> ARABIC LETTER YEH WITH HAMZA ABOVE
u'\u0627' # 0x9F -> ARABIC LETTER ALEF
u'\u0628' # 0xA0 -> ARABIC LETTER BEH
u'\u0629' # 0xA1 -> ARABIC LETTER TEH MARBUTA
u'\u062a' # 0xA2 -> ARABIC LETTER TEH
u'\u062b' # 0xA3 -> ARABIC LETTER THEH
u'\u062c' # 0xA4 -> ARABIC LETTER JEEM
u'\u062d' # 0xA5 -> ARABIC LETTER HAH
u'\u062e' # 0xA6 -> ARABIC LETTER KHAH
u'\u062f' # 0xA7 -> ARABIC LETTER DAL
u'\u0630' # 0xA8 -> ARABIC LETTER THAL
u'\u0631' # 0xA9 -> ARABIC LETTER REH
u'\u0632' # 0xAA -> ARABIC LETTER ZAIN
u'\u0633' # 0xAB -> ARABIC LETTER SEEN
u'\u0634' # 0xAC -> ARABIC LETTER SHEEN
u'\u0635' # 0xAD -> ARABIC LETTER SAD
u'\xab' # 0xAE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xAF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0xB0 -> LIGHT SHADE
u'\u2592' # 0xB1 -> MEDIUM SHADE
u'\u2593' # 0xB2 -> DARK SHADE
u'\u2502' # 0xB3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0xB4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0xB5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0xB6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0xB7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0xB8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0xB9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0xBA -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0xBB -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0xBC -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0xBD -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0xBE -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0xBF -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0xC0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0xC1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0xC2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0xC3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0xC4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0xC5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0xC6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0xC7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0xC8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0xC9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0xCA -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0xCB -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0xCC -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0xCD -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0xCE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0xCF -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0xD0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0xD1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0xD2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0xD3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0xD4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0xD5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0xD6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0xD7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0xD8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0xD9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0xDA -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0xDB -> FULL BLOCK
u'\u2584' # 0xDC -> LOWER HALF BLOCK
u'\u258c' # 0xDD -> LEFT HALF BLOCK
u'\u2590' # 0xDE -> RIGHT HALF BLOCK
u'\u2580' # 0xDF -> UPPER HALF BLOCK
u'\u0636' # 0xE0 -> ARABIC LETTER DAD
u'\u0637' # 0xE1 -> ARABIC LETTER TAH
u'\u0638' # 0xE2 -> ARABIC LETTER ZAH
u'\u0639' # 0xE3 -> ARABIC LETTER AIN
u'\u063a' # 0xE4 -> ARABIC LETTER GHAIN
u'\u0641' # 0xE5 -> ARABIC LETTER FEH
u'\xb5' # 0xE6 -> MICRO SIGN
u'\u0642' # 0xE7 -> ARABIC LETTER QAF
u'\u0643' # 0xE8 -> ARABIC LETTER KAF
u'\u0644' # 0xE9 -> ARABIC LETTER LAM
u'\u0645' # 0xEA -> ARABIC LETTER MEEM
u'\u0646' # 0xEB -> ARABIC LETTER NOON
u'\u0647' # 0xEC -> ARABIC LETTER HEH
u'\u0648' # 0xED -> ARABIC LETTER WAW
u'\u0649' # 0xEE -> ARABIC LETTER ALEF MAKSURA
u'\u064a' # 0xEF -> ARABIC LETTER YEH
u'\u2261' # 0xF0 -> IDENTICAL TO
u'\u064b' # 0xF1 -> ARABIC FATHATAN
u'\u064c' # 0xF2 -> ARABIC DAMMATAN
u'\u064d' # 0xF3 -> ARABIC KASRATAN
u'\u064e' # 0xF4 -> ARABIC FATHA
u'\u064f' # 0xF5 -> ARABIC DAMMA
u'\u0650' # 0xF6 -> ARABIC KASRA
u'\u2248' # 0xF7 -> ALMOST EQUAL TO
u'\xb0' # 0xF8 -> DEGREE SIGN
u'\u2219' # 0xF9 -> BULLET OPERATOR
u'\xb7' # 0xFA -> MIDDLE DOT
u'\u221a' # 0xFB -> SQUARE ROOT
u'\u207f' # 0xFC -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0xFD -> SUPERSCRIPT TWO
u'\u25a0' # 0xFE -> BLACK SQUARE
u'\xa0' # 0xFF -> NO-BREAK SPACE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-2-clause |
Liyier/learning_log | env/Lib/site-packages/pip/compat/dictconfig.py | 921 | 23096 | # This is a copy of the Python logging.config.dictconfig module,
# reproduced with permission. It is provided here for backwards
# compatibility for Python versions prior to 2.7.
#
# Copyright 2009-2010 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import absolute_import
import logging.handlers
import re
import sys
import types
from pip._vendor import six
# flake8: noqa
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
#
# This function is defined in logging only in recent versions of Python
#
try:
from logging import _checkLevel
except ImportError:
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in logging._levelNames:
raise ValueError('Unknown level: %r' % level)
rv = logging._levelNames[level]
else:
raise TypeError('Level not an integer or a '
'valid string: %r' % level)
return rv
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
# If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
# If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
# If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = __import__
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
# print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
# rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, six.string_types): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict((k, config[k]) for k in config if valid_ident(k))
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
# incremental handler config only if handler name
# ties in to logging._handlers (Python 2.7)
if sys.version_info[:2] == (2, 7):
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(_checkLevel(level))
except StandardError as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except StandardError as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except StandardError as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except StandardError as e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except StandardError as e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except StandardError as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
# we don't want to lose the existing loggers,
# since other threads may have pointers to them.
# existing is set to contain all existing loggers,
# and as we go through the new configuration we
# remove any which are configured. At the end,
# what's left in existing is the set of loggers
# which were in the previous configuration but
# which are not in the new configuration.
root = logging.root
existing = list(root.manager.loggerDict)
# The list needs to be sorted so that we can
# avoid disabling child loggers of explicitly
# named loggers. With a sorted list it is easier
# to find the child loggers.
existing.sort()
# We'll keep the list of existing loggers
# which are children of named loggers here...
child_loggers = []
# now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name)
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
i = i + 1 # look at the entry after name
while (i < num_existing) and\
(existing[i][:pflen] == prefixed):
child_loggers.append(existing[i])
i = i + 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except StandardError as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
# Disable any old loggers. There's no point deleting
# them as other threads may continue to hold references
# and by disabling them, you stop them doing any logging.
# However, don't disable children of named loggers, as that's
# probably not what was intended by the user.
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
elif disable_existing:
logger.disabled = True
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except StandardError as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError as te:
if "'format'" not in str(te):
raise
# Name of parameter changed from fmt to format.
# Retry with old name.
# This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
result = logging.Formatter(fmt, dfmt)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except StandardError as e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except StandardError as e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
factory = c
else:
klass = self.resolve(config.pop('class'))
# Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
config['target'] = self.config['handlers'][config['target']]
except StandardError as e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict((k, config[k]) for k in config if valid_ident(k))
try:
result = factory(**kwargs)
except TypeError as te:
if "'stream'" not in str(te):
raise
# The argument name changed from strm to stream
# Retry with old name.
# This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(_checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except StandardError as e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(_checkLevel(level))
if not incremental:
# Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
| mit |
Adnn/django | django/utils/text.py | 308 | 14923 | from __future__ import unicode_literals
import re
import unicodedata
from gzip import GzipFile
from io import BytesIO
from django.utils import six
from django.utils.encoding import force_text
from django.utils.functional import SimpleLazyObject, allow_lazy
from django.utils.safestring import SafeText, mark_safe
from django.utils.six.moves import html_entities
from django.utils.translation import pgettext, ugettext as _, ugettext_lazy
if six.PY2:
# Import force_unicode even though this module doesn't use it, because some
# people rely on it being here.
from django.utils.encoding import force_unicode # NOQA
# Capitalizes the first letter of a string.
capfirst = lambda x: x and force_text(x)[0].upper() + force_text(x)[1:]
capfirst = allow_lazy(capfirst, six.text_type)
# Set up regular expressions
re_words = re.compile(r'<.*?>|((?:\w[-\w]*|&.*?;)+)', re.U | re.S)
re_chars = re.compile(r'<.*?>|(.)', re.U | re.S)
re_tag = re.compile(r'<(/)?([^ ]+?)(?:(\s*/)| .*?)?>', re.S)
re_newlines = re.compile(r'\r\n|\r') # Used in normalize_newlines
re_camel_case = re.compile(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
def wrap(text, width):
"""
A word-wrap function that preserves existing line breaks. Expects that
existing line breaks are posix newlines.
All white space is preserved except added line breaks consume the space on
which they break the line.
Long words are not wrapped, so the output text may have lines longer than
``width``.
"""
text = force_text(text)
def _generator():
for line in text.splitlines(True): # True keeps trailing linebreaks
max_width = min((line.endswith('\n') and width + 1 or width), width)
while len(line) > max_width:
space = line[:max_width + 1].rfind(' ') + 1
if space == 0:
space = line.find(' ') + 1
if space == 0:
yield line
line = ''
break
yield '%s\n' % line[:space - 1]
line = line[space:]
max_width = min((line.endswith('\n') and width + 1 or width), width)
if line:
yield line
return ''.join(_generator())
wrap = allow_lazy(wrap, six.text_type)
class Truncator(SimpleLazyObject):
"""
An object used to truncate text, either by characters or words.
"""
def __init__(self, text):
super(Truncator, self).__init__(lambda: force_text(text))
def add_truncation_text(self, text, truncate=None):
if truncate is None:
truncate = pgettext(
'String to return when truncating text',
'%(truncated_text)s...')
truncate = force_text(truncate)
if '%(truncated_text)s' in truncate:
return truncate % {'truncated_text': text}
# The truncation text didn't contain the %(truncated_text)s string
# replacement argument so just append it to the text.
if text.endswith(truncate):
# But don't append the truncation text if the current text already
# ends in this.
return text
return '%s%s' % (text, truncate)
def chars(self, num, truncate=None, html=False):
"""
Returns the text truncated to be no longer than the specified number
of characters.
Takes an optional argument of what should be used to notify that the
string has been truncated, defaulting to a translatable string of an
ellipsis (...).
"""
length = int(num)
text = unicodedata.normalize('NFC', self._wrapped)
# Calculate the length to truncate to (max length - end_text length)
truncate_len = length
for char in self.add_truncation_text('', truncate):
if not unicodedata.combining(char):
truncate_len -= 1
if truncate_len == 0:
break
if html:
return self._truncate_html(length, truncate, text, truncate_len, False)
return self._text_chars(length, truncate, text, truncate_len)
chars = allow_lazy(chars)
def _text_chars(self, length, truncate, text, truncate_len):
"""
Truncates a string after a certain number of chars.
"""
s_len = 0
end_index = None
for i, char in enumerate(text):
if unicodedata.combining(char):
# Don't consider combining characters
# as adding to the string length
continue
s_len += 1
if end_index is None and s_len > truncate_len:
end_index = i
if s_len > length:
# Return the truncated string
return self.add_truncation_text(text[:end_index or 0],
truncate)
# Return the original string since no truncation was necessary
return text
def words(self, num, truncate=None, html=False):
"""
Truncates a string after a certain number of words. Takes an optional
argument of what should be used to notify that the string has been
truncated, defaulting to ellipsis (...).
"""
length = int(num)
if html:
return self._truncate_html(length, truncate, self._wrapped, length, True)
return self._text_words(length, truncate)
words = allow_lazy(words)
def _text_words(self, length, truncate):
"""
Truncates a string after a certain number of words.
Newlines in the string will be stripped.
"""
words = self._wrapped.split()
if len(words) > length:
words = words[:length]
return self.add_truncation_text(' '.join(words), truncate)
return ' '.join(words)
def _truncate_html(self, length, truncate, text, truncate_len, words):
"""
Truncates HTML to a certain number of chars (not counting tags and
comments), or, if words is True, then to a certain number of words.
Closes opened tags if they were correctly closed in the given HTML.
Newlines in the HTML are preserved.
"""
if words and length <= 0:
return ''
html4_singlets = (
'br', 'col', 'link', 'base', 'img',
'param', 'area', 'hr', 'input'
)
# Count non-HTML chars/words and keep note of open tags
pos = 0
end_text_pos = 0
current_len = 0
open_tags = []
regex = re_words if words else re_chars
while current_len <= length:
m = regex.search(text, pos)
if not m:
# Checked through whole string
break
pos = m.end(0)
if m.group(1):
# It's an actual non-HTML word or char
current_len += 1
if current_len == truncate_len:
end_text_pos = pos
continue
# Check for tag
tag = re_tag.match(m.group(0))
if not tag or current_len >= truncate_len:
# Don't worry about non tags or tags after our truncate point
continue
closing_tag, tagname, self_closing = tag.groups()
# Element names are always case-insensitive
tagname = tagname.lower()
if self_closing or tagname in html4_singlets:
pass
elif closing_tag:
# Check for match in open tags list
try:
i = open_tags.index(tagname)
except ValueError:
pass
else:
# SGML: An end tag closes, back to the matching start tag,
# all unclosed intervening start tags with omitted end tags
open_tags = open_tags[i + 1:]
else:
# Add it to the start of the open tags list
open_tags.insert(0, tagname)
if current_len <= length:
return text
out = text[:end_text_pos]
truncate_text = self.add_truncation_text('', truncate)
if truncate_text:
out += truncate_text
# Close any tags still open
for tag in open_tags:
out += '</%s>' % tag
# Return string
return out
def get_valid_filename(s):
"""
Returns the given string converted to a string that can be used for a clean
filename. Specifically, leading and trailing spaces are removed; other
spaces are converted to underscores; and anything that is not a unicode
alphanumeric, dash, underscore, or dot, is removed.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
"""
s = force_text(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
get_valid_filename = allow_lazy(get_valid_filename, six.text_type)
def get_text_list(list_, last_word=ugettext_lazy('or')):
"""
>>> get_text_list(['a', 'b', 'c', 'd'])
'a, b, c or d'
>>> get_text_list(['a', 'b', 'c'], 'and')
'a, b and c'
>>> get_text_list(['a', 'b'], 'and')
'a and b'
>>> get_text_list(['a'])
'a'
>>> get_text_list([])
''
"""
if len(list_) == 0:
return ''
if len(list_) == 1:
return force_text(list_[0])
return '%s %s %s' % (
# Translators: This string is used as a separator between list elements
_(', ').join(force_text(i) for i in list_[:-1]),
force_text(last_word), force_text(list_[-1]))
get_text_list = allow_lazy(get_text_list, six.text_type)
def normalize_newlines(text):
"""Normalizes CRLF and CR newlines to just LF."""
text = force_text(text)
return re_newlines.sub('\n', text)
normalize_newlines = allow_lazy(normalize_newlines, six.text_type)
def phone2numeric(phone):
"""Converts a phone number with letters into its numeric equivalent."""
char2number = {'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3', 'f': '3',
'g': '4', 'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5', 'm': '6',
'n': '6', 'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7', 't': '8',
'u': '8', 'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9'}
return ''.join(char2number.get(c, c) for c in phone.lower())
phone2numeric = allow_lazy(phone2numeric)
# From http://www.xhaus.com/alan/python/httpcomp.html#gzip
# Used with permission.
def compress_string(s):
zbuf = BytesIO()
zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
zfile.write(s)
zfile.close()
return zbuf.getvalue()
class StreamingBuffer(object):
def __init__(self):
self.vals = []
def write(self, val):
self.vals.append(val)
def read(self):
if not self.vals:
return b''
ret = b''.join(self.vals)
self.vals = []
return ret
def flush(self):
return
def close(self):
return
# Like compress_string, but for iterators of strings.
def compress_sequence(sequence):
buf = StreamingBuffer()
zfile = GzipFile(mode='wb', compresslevel=6, fileobj=buf)
# Output headers...
yield buf.read()
for item in sequence:
zfile.write(item)
data = buf.read()
if data:
yield data
zfile.close()
yield buf.read()
# Expression to match some_token and some_token="with spaces" (and similarly
# for single-quoted strings).
smart_split_re = re.compile(r"""
((?:
[^\s'"]*
(?:
(?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*')
[^\s'"]*
)+
) | \S+)
""", re.VERBOSE)
def smart_split(text):
r"""
Generator that splits a string by spaces, leaving quoted phrases together.
Supports both single and double quotes, and supports escaping quotes with
backslashes. In the output, strings will keep their initial and trailing
quote marks and escaped quotes will remain escaped (the results can then
be further processed with unescape_string_literal()).
>>> list(smart_split(r'This is "a person\'s" test.'))
['This', 'is', '"a person\\\'s"', 'test.']
>>> list(smart_split(r"Another 'person\'s' test."))
['Another', "'person\\'s'", 'test.']
>>> list(smart_split(r'A "\"funky\" style" test.'))
['A', '"\\"funky\\" style"', 'test.']
"""
text = force_text(text)
for bit in smart_split_re.finditer(text):
yield bit.group(0)
def _replace_entity(match):
text = match.group(1)
if text[0] == '#':
text = text[1:]
try:
if text[0] in 'xX':
c = int(text[1:], 16)
else:
c = int(text)
return six.unichr(c)
except ValueError:
return match.group(0)
else:
try:
return six.unichr(html_entities.name2codepoint[text])
except (ValueError, KeyError):
return match.group(0)
_entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def unescape_entities(text):
return _entity_re.sub(_replace_entity, text)
unescape_entities = allow_lazy(unescape_entities, six.text_type)
def unescape_string_literal(s):
r"""
Convert quoted string literals to unquoted strings with escaped quotes and
backslashes unquoted::
>>> unescape_string_literal('"abc"')
'abc'
>>> unescape_string_literal("'abc'")
'abc'
>>> unescape_string_literal('"a \"bc\""')
'a "bc"'
>>> unescape_string_literal("'\'ab\' c'")
"'ab' c"
"""
if s[0] not in "\"'" or s[-1] != s[0]:
raise ValueError("Not a string literal: %r" % s)
quote = s[0]
return s[1:-1].replace(r'\%s' % quote, quote).replace(r'\\', '\\')
unescape_string_literal = allow_lazy(unescape_string_literal)
def slugify(value, allow_unicode=False):
"""
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.
Remove characters that aren't alphanumerics, underscores, or hyphens.
Convert to lowercase. Also strip leading and trailing whitespace.
"""
value = force_text(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
value = re.sub('[^\w\s-]', '', value, flags=re.U).strip().lower()
return mark_safe(re.sub('[-\s]+', '-', value, flags=re.U))
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub('[^\w\s-]', '', value).strip().lower()
return mark_safe(re.sub('[-\s]+', '-', value))
slugify = allow_lazy(slugify, six.text_type, SafeText)
def camel_case_to_spaces(value):
"""
Splits CamelCase and converts to lower case. Also strips leading and
trailing whitespace.
"""
return re_camel_case.sub(r' \1', value).strip().lower()
| bsd-3-clause |
supertree-toolkit/stk | stk/stk_import_export.py | 1 | 22750 | #!/usr/bin/env python
#
# Supertree Toolkit. Software for managing and manipulating sources
# trees ready for supretree construction.
# Copyright (C) 2011, Jon Hill, Katie Davis
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Jon Hill. jon.hill@imperial.ac.uk.
from StringIO import StringIO
import os
import sys
import math
import re
import numpy
from lxml import etree
import stk.nameparser.parser as np
import re
import supertree_toolkit
from copy import deepcopy
from supertree_toolkit import _parse_xml
import stk_exceptions
import stk.p4
import unicodedata
import string as python_string
def export_to_old(xml, output_dir, verbose=False, ignoreWarnings=False):
""" Create an old STK dataset from a PHYML file. Hopefuly not useful
in the long run as all functionality will be replicated, but may
be useful in the short term
"""
if not ignoreWarnings:
xml = supertree_toolkit.clean_data(xml)
# Parse the file and away we go:
xml_root = _parse_xml(xml)
# First get project name and create the directory
find = etree.XPath("//project_name")
project_name = find(xml_root)[0].xpath("string_value")[0].text
project_name.replace(' ','_')
project_dir = os.path.join(output_dir,project_name)
try:
os.mkdir(project_dir)
except OSError:
msg = "Directory already exists. "
msg += "Please check you are trying to output into the correct directory. If so remove "+project_dir
raise stk_exceptions.STKImportExportError(msg)
except:
msg = "Error making project directory: "+os.path.join(output_dir,project_name)
raise stk_exceptions.STKImportExportError(msg)
# Loop through the sources
find = etree.XPath("//source")
find_trees = etree.XPath("//source_tree")
sources = find(xml_root)
for s in sources:
# Make directory
name = s.attrib['name']
if (verbose):
print "----\nWorking on:" +name
if (name == '' or name == None):
msg = "One of the sources does not have a valid name. Aborting."
raise stk_exceptions.STKImportExportError(msg)
source_dir = os.path.join(project_dir,name)
os.mkdir(source_dir)
# for this source, grab each tree_source and create the sub-directories
tree_no = 1
if (verbose):
print "Found "+ str(len(s.xpath("source_tree"))) + " trees in this source"
for t in s.xpath("source_tree"):
tree_dir = os.path.join(source_dir,"Tree_"+str(tree_no))
os.mkdir(tree_dir)
# save the tree data
tree = t.xpath("tree/tree_string/string_value")[0].text
stk.p4.var.warnReadNoFile = False
stk.p4.var.trees = []
stk.p4.read(tree)
stk.p4.var.warnReadNoFile = True
trees = stk.p4.var.trees
stk.p4.var.trees = []
tree = trees[0].writeNewick(fName=None,toString=True).strip()
out_tree_file = open(os.path.join(tree_dir,name+"_tree_"+str(tree_no)+".tre"),"w")
out_tree_file.write('#NEXUS\nBEGIN TREES;\nTree tree_1 = [&u] ')
out_tree_file.write(tree)
out_tree_file.write("\nENDBLOCK;")
out_tree_file.close()
# create and save XML
create_xml_metadata(etree.tostring(s), etree.tostring(t), os.path.join(tree_dir,name+"_tree_"+str(tree_no)))
tree_no += 1
def import_old_data(input_dir, verbose=False):
""" Converts an old STK dataset (based on directories) to the new PHYML
file format. Note: we need XML files to get the meta data and also that
the data imported may not be complete. It's up to the calling program to save the resulting
xml string somewhere sensible.
"""
# strip trailing path separator if one
if (input_dir.endswith(os.path.sep)):
t = input_dir[0:-1]
input_dir = t
# Parse the file and away we go:
base_xml = """<?xml version='1.0' encoding='utf-8'?>
<phylo_storage>
<project_name>
<string_value lines="1"/>
</project_name>
<sources>
</sources>
<history/>
</phylo_storage>"""
xml_root = etree.fromstring(base_xml)
find = etree.XPath("//sources")
sources = find(xml_root)[0]
# add the project name from the input directory
xml_root.xpath("/phylo_storage/project_name/string_value")[0].text = os.path.basename(input_dir)
# for each XML
nXML = 0;
for xml in locate('*.xml', input_dir):
# parse XML
if (verbose):
print "Parsing: "+xml
current_xml = etree.parse(xml)
# convert into PHYML
new_source = convert_to_phyml_source(current_xml)
# This is now the source_tree portion of the XML
source_tree = convert_to_phyml_sourcetree(current_xml, xml)
# add into PHYML sources element
append_to_source, already_in = supertree_toolkit.already_in_data(new_source,sources)
if (not already_in):
# append tree to current source
new_source.append(deepcopy(source_tree))
sources.append(deepcopy(new_source)) # deepcopy otherwise it'll add the same one several times :|
else:
# we need to find the correct source and append the source_tree to this
append_to_source.append(deepcopy(source_tree))
nXML += 1
if (nXML == 0):
msg = "Didn't find any XML files in this directory"
raise stk_exceptions.STKImportExportError(msg)
# create all sourcenames
phyml = supertree_toolkit.all_sourcenames(etree.tostring(xml_root))
phyml = supertree_toolkit.set_all_tree_names(phyml)
return phyml
def locate(pattern, root=os.curdir):
"""Locate all files matching the pattern with the root dir and
all subdirectories
"""
import fnmatch
for path, dirs, files in os.walk(os.path.abspath(root)):
for filename in fnmatch.filter(files,pattern):
yield os.path.join(path, filename)
def convert_to_phyml_source(xml_root):
""" Converts old STK XML to a new STK source XML block
ready for insertion into a PHYML tree
"""
# parse XML file and extract necessary info
find = etree.XPath("//Source")
Source = find(xml_root)[0]
input_author = Source.xpath('Author')[0].text
input_title = Source.xpath('Title')[0].text
input_year = Source.xpath('Year')[0].text
input_journal = Source.xpath('Journal')[0].text
input_volume = Source.xpath('Volume')[0].text
input_pages = Source.xpath('Pages')[0].text
input_booktitle = Source.xpath('Booktitle')[0].text
input_editor = Source.xpath('Editor')[0].text
input_publisher = Source.xpath('Publisher')[0].text
author_list = []
# split the string using ',', then stich together is needed
a = input_author.lower()
if isinstance(a, unicode):
a = unicodedata.normalize('NFKD', a).encode('ascii','ignore')
author_list = a.split(' and ')
# authors_t = a.split(',')
# authors_temp = []
# if (len(authors_t) > 1):
# for a in authors_t:
# authors_temp.extend(a.split(' and '))
#
# if (len(authors_temp) > 1):
# i = 0
# while i<len(authors_temp):
# if (i+1 < len(authors_temp)):
# m = re.search('\.', authors_temp[i+1])
# if (m != None):
# # next token contains a full stop so is probably an initial
# author_list.append(str.strip(authors_temp[i+1]) + " " + str.strip(authors_temp[i]))
# i += 2
# else:
# author_list.append(authors_temp[i])
# i += 1
# else:
# author_list.append(authors_temp[i])
# i += 1
# else:
# author_list = a.split('and')
if (len(author_list) == 0):
author_list.append(input_author)
phyml_root = etree.Element("source")
publication = etree.SubElement(phyml_root,"bibliographic_information")
# does it contain a booktitle?
contains_booktitle = False
if (contains_booktitle):
article = etree.SubElement(publication,"book")
else:
article = etree.SubElement(publication,"article")
authors = etree.SubElement(article,"authors")
# now parse authors into something sensible
# authors - parse into full author names, then use nameparse to extract first and last
for a in author_list:
# further munging of name
a = a.strip()
bits = a.split(',')
if (len(bits) > 1):
a = bits[1].strip()+" "+bits[0].strip()
o = np.HumanName(a)
ae = etree.SubElement(authors,'author')
surname = etree.SubElement(ae,'surname')
string = etree.SubElement(surname,'string_value')
string.attrib['lines'] = "1"
string.text = python_string.capwords(o.last)
if (o.last.capitalize() == ''):
string.text = a
first = etree.SubElement(ae,'other_names')
string = etree.SubElement(first,'string_value')
string.attrib['lines'] = "1"
other = python_string.capwords(o.first)
string.text = other
# reset to empty if needed
if (o.first == None):
string.text = ''
# title and the publication data
title = etree.SubElement(article,"title")
string = etree.SubElement(title,"string_value")
string.attrib['lines'] = "1"
string.text = input_title
volume = etree.SubElement(article,"volume")
string = etree.SubElement(volume,"string_value")
string.attrib['lines'] = "1"
string.text = input_volume
year = etree.SubElement(article,"year")
integer = etree.SubElement(year,"integer_value")
integer.attrib['rank'] = "0"
integer.text = input_year
journal = etree.SubElement(article,"journal")
string = etree.SubElement(journal,"string_value")
string.attrib['lines'] = "1"
string.text = input_journal
pages = etree.SubElement(article,"pages")
string = etree.SubElement(pages,"string_value")
string.attrib['lines'] = "1"
string.text = input_pages
return phyml_root
def convert_to_phyml_sourcetree(input_xml, xml_file):
""" Extract the source_tree data from the old-style XML
and create an XML tree inthe new style. We leave it to the
main program to check that we append or add the source
"""
# get tree filename from current_xml
find_treefiles = etree.XPath('//TreeFile')
treefile = find_treefiles(input_xml)[0].text
# now stick on the root path of the XML to get the full path of the treefile
cur_dir = os.path.split(xml_file)[0]
try:
tree = supertree_toolkit.import_tree(os.path.join(cur_dir,treefile))
except stk_exceptions.TreeParseError as detail:
msg = "***Error: failed to parse a tree in your data set.\n"
msg += "File is: "+treefile+"\n"+detail.msg
print msg
return
except IOError:
# try just the file if we failed - windows formatted
treefile = treefile.rsplit('\\')[-1]
try:
tree = supertree_toolkit.import_tree(os.path.join(cur_dir,treefile))
except stk_exceptions.TreeParseError as detail:
msg = "***Error: failed to parse a tree in your data set.\n"
msg += "File is: "+treefile+"\n"+detail.msg
print msg
return
# all other data
find_mol = etree.XPath('//Characters/Molecular/Type')
find_morph = etree.XPath('//Characters/Morphological/Type')
find_behave = etree.XPath('//Characters/Behavioural/Type')
find_other = etree.XPath('//Characters/Other/Type')
taxa_type = input_xml.xpath('/SourceTree/Taxa')[0].attrib['fossil']
if (taxa_type == "some"):
mixed = True
allextant = False
allfossil = False
elif (taxa_type == "all"):
mixed = False
allextant = False
allfossil = True
elif (taxa_type == "none"):
mixed = False
allextant = True
allfossil = False
else:
print "Unknown taxa types in "+xml_file
print "Setting to mixed fossil and extant so you have to correct this later"
mixed = True
allextant = False
allfossil = False
# analysis
input_comments = input_xml.xpath('/SourceTree/Notes')[0].text
input_analysis = input_xml.xpath('/SourceTree/Analysis/Type')[0].text
# Theres a translation to be done here
if (input_analysis == "MP"):
input_analysis = "Maximum Parsimony"
if (input_analysis == "ML"):
input_analysis = "Maximum Likelihood"
# construct new XML
source_tree = etree.Element("source_tree")
# tree data
tree_ele = etree.SubElement(source_tree,"tree")
tree_string = etree.SubElement(tree_ele,"tree_string")
string = etree.SubElement(tree_string,"string_value")
string.attrib["lines"] = "1"
string.text = tree
# comment
if (not input_comments == None):
comment = etree.SubElement(tree_string,"comment")
comment.text = input_comments
# Figure and page number stuff
figure_legend = etree.SubElement(tree_ele,"figure_legend")
figure_legend.tail="\n "
figure_legend_string = etree.SubElement(figure_legend,"string_value")
figure_legend_string.tail="\n "
figure_legend_string.attrib['lines'] = "1"
figure_legend_string.text = "NA"
figure_number = etree.SubElement(tree_ele,"figure_number")
figure_number.tail="\n "
figure_number_string = etree.SubElement(figure_number,"string_value")
figure_number_string.tail="\n "
figure_number_string.attrib['lines'] = "1"
figure_number_string.text = "0"
page_number = etree.SubElement(tree_ele,"page_number")
page_number.tail="\n "
page_number_string = etree.SubElement(page_number,"string_value")
page_number_string.tail="\n "
page_number_string.attrib['lines'] = "1"
tree_inference = etree.SubElement(tree_ele,"tree_inference")
optimality_criterion = etree.SubElement(tree_inference,"optimality_criterion")
# analysis
optimality_criterion.attrib['name'] = input_analysis
# taxa data
taxa_data = etree.SubElement(source_tree,"taxa_data")
if (allfossil):
taxa_type = etree.SubElement(taxa_data,"all_fossil")
elif (allextant):
taxa_type = etree.SubElement(taxa_data,"all_extant")
else:
taxa_type = etree.SubElement(taxa_data,"mixed_fossil_and_extant")
# We *should* add a taxon here to make sure this is valid
# phyml according to the schema. However, in doin so we will fail the
# taxon check as we don't know which taxon (or taxa) is a fossil, as
# this in formation is not recorded in the old STK XML files.
# We therefore leave this commented out as a reminder to the
# next soul to edit this
#taxon = etree.SubElement(taxa_type,"taxon")
character_data = etree.SubElement(source_tree,"character_data")
# loop over characters add correctly
chars = find_mol(input_xml)
for c in chars:
new_char = etree.SubElement(character_data,"character")
new_char.attrib['type'] = "molecular"
new_char.attrib['name'] = c.text
chars = find_morph(input_xml)
for c in chars:
new_char = etree.SubElement(character_data,"character")
new_char.attrib['type'] = "morphological"
new_char.attrib['name'] = c.text
chars = find_behave(input_xml)
for c in chars:
new_char = etree.SubElement(character_data,"character")
new_char.attrib['type'] = "behavioural"
new_char.attrib['name'] = c.text
chars = find_other(input_xml)
for c in chars:
new_char = etree.SubElement(character_data,"character")
new_char.attrib['type'] = "other"
new_char.attrib['name'] = c.text
return source_tree
def create_xml_metadata(XML_string, this_source, filename):
""" Converts a PHYML source block to the old style XML file"""
XML = etree.fromstring(XML_string)
source_XML = etree.fromstring(this_source)
# from file name we can construct new tree object
try:
stk.p4.var.warnReadNoFile = False
stk.p4.var.trees = []
stk.p4.read(filename+'.tre')
stk.p4.var.warnReadNoFile = True
except:
raise stk_exceptions.TreeParseError("Error parsing " + filename)
trees = stk.p4.var.trees
stk.p4.var.trees = []
tree = trees[0]
taxa_list = tree.getAllLeafNames(0)
new_xml = etree.Element("SourceTree")
# The source publication info
source = etree.SubElement(new_xml,"Source")
author = etree.SubElement(source,"Author")
find_authors = etree.XPath("//author")
authors = find_authors(XML)
authors_list = ''
for a in authors:
s = a.xpath('surname/string_value')[0].text
o = ''
try:
o = a.xpath('other_names/string_value')[0].text
except:
pass
if (authors_list != ''):
authors_list = authors_list+" and "
authors_list += s
if (not o == ''):
authors_list += ", "+o+"."
author.text = authors_list
year = etree.SubElement(source,"Year")
year.text = XML.xpath("//year/integer_value")[0].text
title = etree.SubElement(source,"Title")
title.text = XML.xpath("//title/string_value")[0].text
journal = etree.SubElement(source,"Journal")
if (len(XML.xpath("//journal/string_value")) > 0):
journal.text = XML.xpath("//journal/string_value")[0].text
volume = etree.SubElement(source,"Volume")
if (len(XML.xpath("//volume/string_value")) > 0):
volume.text = XML.xpath("//volume/string_value")[0].text
book = etree.SubElement(source,"Booktitle")
if (len(XML.xpath("//booktitle/string_value")) > 0):
book.text = XML.xpath("//booktitle/string_value")[0].text
page = etree.SubElement(source,"Pages")
if (len(XML.xpath("//pages/string_value")) > 0):
tmp_txt = XML.xpath("//pages/string_value")[0].text
if not tmp_txt == None:
tmp_txt = tmp_txt.replace("–","-")
else:
tmp_txt = ""
page.text = tmp_txt
editor = etree.SubElement(source,"Editor")
find_editors= etree.XPath("//editor/surname")
surnames = find_editors(XML)
authors_list = ''
for s in surnames:
if (authors_list != ''):
authors_list = authors_list+" and "
authors_list += s.xpath('string_value')[0].text
editor.text = authors_list
publisher = etree.SubElement(source, "Publisher")
if (len(XML.xpath("//publisher/string_value")) > 0):
publisher.text = XML.xpath("//publisher/string_value")[0].text
# The taxa info
taxa = etree.SubElement(new_xml,"Taxa")
# add List for the number of taxa
for t in taxa_list:
l = etree.SubElement(taxa, "List")
t = t.replace('_',' ')
l.text = t
# if we find any taxa will fossil switched on, then add fossil attribute
find_fossil = etree.XPath("//fossil")
if (len(find_fossil(source_XML)) == 0):
taxa.attrib['fossil'] = 'none'
elif (len(find_fossil(source_XML)) == len(taxa_list)):
taxa.attrib['fossil'] = 'all'
else:
taxa.attrib['fossil'] = 'some'
taxa.attrib['number'] = str(len(taxa_list))
# character data
character = etree.SubElement(new_xml,"Characters")
find_characters = etree.XPath("//character")
characters_phyml = find_characters(source_XML)
nMolecular = 0
nMorpho = 0
nBehaviour = 0
nOther = 0
molecular = etree.SubElement(character,"Molecular")
morphological = etree.SubElement(character,"Morphological")
behavioural = etree.SubElement(character,"Behavioural")
other = etree.SubElement(character,"Other")
for c in characters_phyml:
if c.attrib['type'] == 'molecular':
l = etree.SubElement(molecular,"Type")
l.text = c.attrib['name']
nMolecular += 1
if c.attrib['type'] == 'behavioural':
l = etree.SubElement(behavioural,"Type")
l.text = c.attrib['name']
nBehaviour += 1
if c.attrib['type'] == 'morphological':
l = etree.SubElement(morphological,"Type")
l.text = c.attrib['name']
nMorpho += 1
if c.attrib['type'] == 'other':
l = etree.SubElement(other,"Type")
l.text = c.attrib['name']
nOther += 0
if (nMolecular > 0):
molecular.attrib['number'] = str(nMolecular)
if (nBehaviour > 0):
behavioural.attrib['number'] = str(nBehaviour)
if (nMorpho > 0):
morphological.attrib['number'] = str(nMorpho)
if (nOther > 0):
other.attrib['number'] = str(nOther)
# analysis data
analysis = etree.SubElement(new_xml,"Analysis")
find_analysis = etree.XPath("//analysis")
analysis_phyml = find_analysis(source_XML)
for a in analysis_phyml:
l = etree.SubElement(analysis,"Type")
l.text = a.attrib['name']
# tree file - same directory :)
tree_f = etree.SubElement(new_xml,"TreeFile")
tree_file_only = os.path.basename(filename)
tree_file_only += '.tre'
tree_f.text = tree_file_only
# Grab any comments under the tree and add it here
notes = etree.SubElement(new_xml,'Notes')
find_comments = etree.XPath("//comment")
comments_phyml = find_comments(source_XML)
comments = ""
for c in comments_phyml:
if (not c.text == None):
if (not comments == ""):
comments = "\n" + c.text
else:
comments += c.text
notes.text = comments
xml_string = etree.tostring(new_xml, encoding='iso-8859-1', pretty_print=True)
f = open(filename+'.xml','w')
f.write(xml_string)
f.close()
#def _capitalise_source_name(name):
# "Capiltalises a source name, taking into account etal
# smith_jones_2003 -> Smith_Jones_2003
# smith_etal_2003 -> Smith_etal_2003
# etc
# """
| gpl-3.0 |
hpfem/agros2d | resources/python/pylint/checkers/newstyle.py | 4 | 4489 | # Copyright (c) 2005-2006 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""check for new / old style related problems
"""
from logilab import astng
from pylint.interfaces import IASTNGChecker
from pylint.checkers import BaseChecker
from pylint.checkers.utils import check_messages
MSGS = {
'E1001': ('Use of __slots__ on an old style class',
'slots-on-old-class',
'Used when an old style class uses the __slots__ attribute.'),
'E1002': ('Use of super on an old style class',
'super-on-old-class',
'Used when an old style class uses the super builtin.'),
'E1003': ('Bad first argument %r given to super class',
'bad-super-call',
'Used when another argument than the current class is given as \
first argument of the super builtin.'),
'W1001': ('Use of "property" on an old style class',
'property-on-old-class',
'Used when PyLint detect the use of the builtin "property" \
on an old style class while this is relying on new style \
classes features'),
}
class NewStyleConflictChecker(BaseChecker):
"""checks for usage of new style capabilities on old style classes and
other new/old styles conflicts problems
* use of property, __slots__, super
* "super" usage
"""
__implements__ = (IASTNGChecker,)
# configuration section name
name = 'newstyle'
# messages
msgs = MSGS
priority = -2
# configuration options
options = ()
@check_messages('E1001')
def visit_class(self, node):
"""check __slots__ usage
"""
if '__slots__' in node and not node.newstyle:
self.add_message('E1001', node=node)
@check_messages('W1001')
def visit_callfunc(self, node):
"""check property usage"""
parent = node.parent.frame()
if (isinstance(parent, astng.Class) and
not parent.newstyle and
isinstance(node.func, astng.Name)):
name = node.func.name
if name == 'property':
self.add_message('W1001', node=node)
@check_messages('E1002', 'E1003')
def visit_function(self, node):
"""check use of super"""
# ignore actual functions or method within a new style class
if not node.is_method():
return
klass = node.parent.frame()
for stmt in node.nodes_of_class(astng.CallFunc):
expr = stmt.func
if not isinstance(expr, astng.Getattr):
continue
call = expr.expr
# skip the test if using super
if isinstance(call, astng.CallFunc) and \
isinstance(call.func, astng.Name) and \
call.func.name == 'super':
if not klass.newstyle:
# super should not be used on an old style class
self.add_message('E1002', node=node)
else:
# super first arg should be the class
try:
supcls = (call.args and call.args[0].infer().next()
or None)
except astng.InferenceError:
continue
if klass is not supcls:
supcls = getattr(supcls, 'name', supcls)
self.add_message('E1003', node=node, args=supcls)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(NewStyleConflictChecker(linter))
| gpl-2.0 |
pronobis/rocs | thirdparty/gtest-1.6.0/test/gtest_test_utils.py | 397 | 10437 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest:
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO(vladl@google.com): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| bsd-2-clause |
divio/askbot-devel | askbot/views/writers.py | 1 | 39556 | # encoding:utf-8
"""
:synopsis: views diplaying and processing main content post forms
This module contains views that allow adding, editing, and deleting main textual content.
"""
import datetime
import logging
import os
import os.path
import random
import sys
import tempfile
import time
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.http import HttpResponseForbidden
from django.http import HttpResponseRedirect
from django.http import Http404
from django.utils import simplejson
from django.utils.html import strip_tags, escape
from django.utils.translation import get_language
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from django.core.urlresolvers import reverse
from django.core import exceptions
from django.conf import settings
from django.views.decorators import csrf
from django.contrib.auth.models import User
from askbot import exceptions as askbot_exceptions
from askbot import forms
from askbot import models
from askbot import signals
from askbot.conf import settings as askbot_settings
from askbot.utils import decorators
from askbot.utils.forms import format_errors
from askbot.utils.functions import diff_date
from askbot.utils import url_utils
from askbot.utils.file_utils import store_file
from askbot.utils.loading import load_module
from askbot.views import context
from askbot.templatetags import extra_filters_jinja as template_filters
from askbot.importers.stackexchange import management as stackexchange#todo: may change
from askbot.utils.slug import slugify
from recaptcha_works.decorators import fix_recaptcha_remote_ip
# used in index page
INDEX_PAGE_SIZE = 20
INDEX_AWARD_SIZE = 15
INDEX_TAGS_SIZE = 100
# used in tags list
DEFAULT_PAGE_SIZE = 60
# used in questions
QUESTIONS_PAGE_SIZE = 10
# used in answers
ANSWERS_PAGE_SIZE = 10
#todo: make this work with csrf
@csrf.csrf_exempt
def upload(request):#ajax upload file to a question or answer
"""view that handles file upload via Ajax
"""
# check upload permission
result = ''
error = ''
new_file_name = ''
try:
#may raise exceptions.PermissionDenied
result, error, file_url, orig_file_name = None, '', None, None
if request.user.is_anonymous():
msg = _('Sorry, anonymous users cannot upload files')
raise exceptions.PermissionDenied(msg)
request.user.assert_can_upload_file()
#todo: build proper form validation
file_name_prefix = request.POST.get('file_name_prefix', '')
if file_name_prefix not in ('', 'group_logo_'):
raise exceptions.PermissionDenied('invalid upload file name prefix')
#todo: check file type
uploaded_file = request.FILES['file-upload']#take first file
orig_file_name = uploaded_file.name
#todo: extension checking should be replaced with mimetype checking
#and this must be part of the form validation
file_extension = os.path.splitext(orig_file_name)[1].lower()
if not file_extension in settings.ASKBOT_ALLOWED_UPLOAD_FILE_TYPES:
file_types = "', '".join(settings.ASKBOT_ALLOWED_UPLOAD_FILE_TYPES)
msg = _("allowed file types are '%(file_types)s'") % \
{'file_types': file_types}
raise exceptions.PermissionDenied(msg)
# generate new file name and storage object
file_storage, new_file_name, file_url = store_file(
uploaded_file, file_name_prefix
)
# check file size
# byte
size = file_storage.size(new_file_name)
if size > settings.ASKBOT_MAX_UPLOAD_FILE_SIZE:
file_storage.delete(new_file_name)
msg = _("maximum upload file size is %(file_size)sK") % \
{'file_size': settings.ASKBOT_MAX_UPLOAD_FILE_SIZE}
raise exceptions.PermissionDenied(msg)
except exceptions.PermissionDenied, e:
error = unicode(e)
except Exception, e:
logging.critical(unicode(e))
error = _('Error uploading file. Please contact the site administrator. Thank you.')
if error == '':
result = 'Good'
else:
result = ''
file_url = ''
#data = simplejson.dumps({
# 'result': result,
# 'error': error,
# 'file_url': file_url
#})
#return HttpResponse(data, mimetype = 'application/json')
xml_template = "<result><msg><![CDATA[%s]]></msg><error><![CDATA[%s]]></error><file_url>%s</file_url><orig_file_name><![CDATA[%s]]></orig_file_name></result>"
xml = xml_template % (result, error, file_url, orig_file_name)
return HttpResponse(xml, content_type="application/xml")
def __import_se_data(dump_file):
"""non-view function that imports the SE data
in the future may import other formats as well
In this function stdout is temporarily
redirected, so that the underlying importer management
command could stream the output to the browser
todo: maybe need to add try/except clauses to restore
the redirects in the exceptional situations
"""
fake_stdout = tempfile.NamedTemporaryFile()
real_stdout = sys.stdout
sys.stdout = fake_stdout
importer = stackexchange.ImporterThread(dump_file = dump_file.name)
importer.start()
#run a loop where we'll be reading output of the
#importer tread and yielding it to the caller
read_stdout = open(fake_stdout.name, 'r')
file_pos = 0
fd = read_stdout.fileno()
yield '<html><body><style>* {font-family: sans;} p {font-size: 12px; line-height: 16px; margin: 0; padding: 0;}</style><h1>Importing your data. This may take a few minutes...</h1>'
while importer.isAlive():
c_size = os.fstat(fd).st_size
if c_size > file_pos:
line = read_stdout.readline()
yield '<p>' + line + '</p>'
file_pos = read_stdout.tell()
fake_stdout.close()
read_stdout.close()
dump_file.close()
sys.stdout = real_stdout
yield '<p>Done. Please, <a href="%s">Visit Your Forum</a></p></body></html>' % reverse('index')
@csrf.csrf_protect
def import_data(request):
"""a view allowing the site administrator
upload stackexchange data
"""
#allow to use this view to site admins
#or when the forum in completely empty
if request.user.is_anonymous() or (not request.user.is_administrator()):
if models.Post.objects.get_questions().exists():
raise Http404
if request.method == 'POST':
#if not request.is_ajax():
# raise Http404
form = forms.DumpUploadForm(request.POST, request.FILES)
if form.is_valid():
dump_file = form.cleaned_data['dump_file']
dump_storage = tempfile.NamedTemporaryFile()
#save the temp file
for chunk in dump_file.chunks():
dump_storage.write(chunk)
dump_storage.flush()
return HttpResponse(__import_se_data(dump_storage))
#yield HttpResponse(_('StackExchange import complete.'), content_type='text/plain')
#dump_storage.close()
else:
form = forms.DumpUploadForm()
data = {
'dump_upload_form': form,
'need_configuration': (not stackexchange.is_ready())
}
return render(request, 'import_data.html', data)
@fix_recaptcha_remote_ip
@csrf.csrf_protect
@decorators.check_authorization_to_post(ugettext_lazy('Please log in to make posts'))
@decorators.check_spam('text')
def ask(request):#view used to ask a new question
"""a view to ask a new question
gives space for q title, body, tags and checkbox for to post as wiki
user can start posting a question anonymously but then
must login/register in order for the question go be shown
"""
if request.user.is_authenticated():
if request.user.is_read_only():
referer = request.META.get("HTTP_REFERER", reverse('questions'))
request.user.message_set.create(message=_('Sorry, but you have only read access'))
return HttpResponseRedirect(referer)
if askbot_settings.READ_ONLY_MODE_ENABLED:
return HttpResponseRedirect(reverse('index'))
if request.method == 'POST':
form = forms.AskForm(request.POST, user=request.user)
if form.is_valid():
timestamp = datetime.datetime.now()
title = form.cleaned_data['title']
wiki = form.cleaned_data['wiki']
tagnames = form.cleaned_data['tags']
text = form.cleaned_data['text']
ask_anonymously = form.cleaned_data['ask_anonymously']
post_privately = form.cleaned_data['post_privately']
group_id = form.cleaned_data.get('group_id', None)
language = form.cleaned_data.get('language', None)
if request.user.is_authenticated():
drafts = models.DraftQuestion.objects.filter(author=request.user)
drafts.delete()
user = form.get_post_user(request.user)
elif request.user.is_anonymous() and askbot_settings.ALLOW_ASK_UNREGISTERED:
user = models.get_or_create_anonymous_user()
ask_anonymously = True
else:
user = None
if user:
try:
question = user.post_question(
title=title,
body_text=text,
tags=tagnames,
wiki=wiki,
is_anonymous=ask_anonymously,
is_private=post_privately,
timestamp=timestamp,
group_id=group_id,
language=language,
ip_addr=request.META.get('REMOTE_ADDR')
)
signals.new_question_posted.send(None,
question=question,
user=user,
form_data=form.cleaned_data
)
return HttpResponseRedirect(question.get_absolute_url())
except exceptions.PermissionDenied, e:
request.user.message_set.create(message = unicode(e))
return HttpResponseRedirect(reverse('index'))
else:
request.session.flush()
session_key=request.session.session_key
models.AnonymousQuestion.objects.create(
session_key=session_key,
title=title,
tagnames=tagnames,
wiki=wiki,
is_anonymous=ask_anonymously,
text=text,
added_at=timestamp,
ip_addr=request.META.get('REMOTE_ADDR'),
)
return HttpResponseRedirect(url_utils.get_login_url())
if request.method == 'GET':
form = forms.AskForm(user=request.user)
draft_title = ''
draft_text = ''
draft_tagnames = ''
if request.user.is_authenticated():
drafts = models.DraftQuestion.objects.filter(author=request.user)
if len(drafts) > 0:
draft = drafts[0]
draft_title = draft.title
draft_text = draft.text
draft_tagnames = draft.tagnames
form.initial = {
'ask_anonymously': request.REQUEST.get('ask_anonymously', False),
'tags': request.REQUEST.get('tags', draft_tagnames),
'text': request.REQUEST.get('text', draft_text),
'title': request.REQUEST.get('title', draft_title),
'post_privately': request.REQUEST.get('post_privately', False),
'language': get_language(),
'wiki': request.REQUEST.get('wiki', False),
}
if 'group_id' in request.REQUEST:
try:
group_id = int(request.GET.get('group_id', None))
form.initial['group_id'] = group_id
except Exception:
pass
editor_is_folded = (askbot_settings.QUESTION_BODY_EDITOR_MODE=='folded' and \
askbot_settings.MIN_QUESTION_BODY_LENGTH==0 and \
form.initial['text'] == '')
data = {
'active_tab': 'ask',
'page_class': 'ask-page',
'form' : form,
'editor_is_folded': editor_is_folded,
'mandatory_tags': models.tag.get_mandatory_tags(),
'email_validation_faq_url':reverse('faq') + '#validate',
'category_tree_data': askbot_settings.CATEGORY_TREE,
'tag_names': list()#need to keep context in sync with edit_question for tag editor
}
data.update(context.get_for_tag_editor())
return render(request, 'ask.html', data)
@login_required
@csrf.csrf_protect
def retag_question(request, id):
"""retag question view
"""
question = get_object_or_404(models.Post, id=id)
try:
request.user.assert_can_retag_question(question)
if request.method == 'POST':
form = forms.RetagQuestionForm(question, request.POST)
if form.is_valid():
if form.has_changed():
request.user.retag_question(question=question, tags=form.cleaned_data['tags'])
if request.is_ajax():
response_data = {
'success': True,
'new_tags': question.thread.tagnames
}
if request.user.message_set.count() > 0:
#todo: here we will possibly junk messages
message = request.user.get_and_delete_messages()[-1]
response_data['message'] = message
data = simplejson.dumps(response_data)
return HttpResponse(data, content_type="application/json")
else:
return HttpResponseRedirect(question.get_absolute_url())
elif request.is_ajax():
response_data = {
'message': format_errors(form.errors['tags']),
'success': False
}
data = simplejson.dumps(response_data)
return HttpResponse(data, content_type="application/json")
else:
form = forms.RetagQuestionForm(question)
data = {
'active_tab': 'questions',
'question': question,
'form' : form,
}
return render(request, 'question_retag.html', data)
except exceptions.PermissionDenied, e:
if request.is_ajax():
response_data = {
'message': unicode(e),
'success': False
}
data = simplejson.dumps(response_data)
return HttpResponse(data, content_type="application/json")
else:
request.user.message_set.create(message = unicode(e))
return HttpResponseRedirect(question.get_absolute_url())
@login_required
@csrf.csrf_protect
@decorators.check_spam('text')
@fix_recaptcha_remote_ip
def edit_question(request, id):
"""edit question view
"""
question = get_object_or_404(models.Post, id=id)
if askbot_settings.READ_ONLY_MODE_ENABLED:
return HttpResponseRedirect(question.get_absolute_url())
try:
revision = question.revisions.get(revision=0)
except models.PostRevision.DoesNotExist:
revision = question.get_latest_revision()
revision_form = None
try:
request.user.assert_can_edit_question(question)
if request.method == 'POST':
if request.POST['select_revision'] == 'true':
#revert-type edit - user selected previous revision
revision_form = forms.RevisionForm(
question,
revision,
request.POST
)
if revision_form.is_valid():
# Replace with those from the selected revision
rev_id = revision_form.cleaned_data['revision']
revision = question.revisions.get(revision = rev_id)
form = forms.EditQuestionForm(
question=question,
user=request.user,
revision=revision
)
else:
form = forms.EditQuestionForm(
request.POST,
question=question,
user=question.user,
revision=revision
)
else:#new content edit
# Always check modifications against the latest revision
form = forms.EditQuestionForm(
request.POST,
question=question,
revision=revision,
user=request.user,
)
revision_form = forms.RevisionForm(question, revision)
if form.is_valid():
if form.has_changed():
if form.can_edit_anonymously() and form.cleaned_data['reveal_identity']:
question.thread.remove_author_anonymity()
question.is_anonymous = False
is_wiki = form.cleaned_data.get('wiki', question.wiki)
post_privately = form.cleaned_data['post_privately']
suppress_email = form.cleaned_data['suppress_email']
user = form.get_post_user(request.user)
user.edit_question(
question=question,
title=form.cleaned_data['title'],
body_text=form.cleaned_data['text'],
revision_comment=form.cleaned_data['summary'],
tags=form.cleaned_data['tags'],
wiki=is_wiki,
edit_anonymously=form.cleaned_data['edit_anonymously'],
is_private=post_privately,
suppress_email=suppress_email,
ip_addr=request.META.get('REMOTE_ADDR')
)
if 'language' in form.cleaned_data:
question.thread.set_language_code(form.cleaned_data['language'])
return HttpResponseRedirect(question.get_absolute_url())
else:
#request type was "GET"
revision_form = forms.RevisionForm(question, revision)
initial = {
'language': question.thread.language_code,
'post_privately': question.is_private(),
'wiki': question.wiki
}
form = forms.EditQuestionForm(
question=question,
revision=revision,
user=request.user,
initial=initial
)
data = {
'page_class': 'edit-question-page',
'active_tab': 'questions',
'question': question,
'revision': revision,
'revision_form': revision_form,
'mandatory_tags': models.tag.get_mandatory_tags(),
'form' : form,
'tag_names': question.thread.get_tag_names(),
'category_tree_data': askbot_settings.CATEGORY_TREE
}
data.update(context.get_for_tag_editor())
return render(request, 'question_edit.html', data)
except exceptions.PermissionDenied, e:
request.user.message_set.create(message = unicode(e))
return HttpResponseRedirect(question.get_absolute_url())
@login_required
@csrf.csrf_protect
@decorators.check_spam('text')
@fix_recaptcha_remote_ip
def edit_answer(request, id):
answer = get_object_or_404(models.Post, id=id)
if askbot_settings.READ_ONLY_MODE_ENABLED:
return HttpResponseRedirect(answer.get_absolute_url())
try:
revision = answer.revisions.get(revision=0)
except models.PostRevision.DoesNotExist:
revision = answer.get_latest_revision()
class_path = getattr(settings, 'ASKBOT_EDIT_ANSWER_FORM', None)
if class_path:
edit_answer_form_class = load_module(class_path)
else:
edit_answer_form_class = forms.EditAnswerForm
try:
request.user.assert_can_edit_answer(answer)
if request.method == "POST":
if request.POST['select_revision'] == 'true':
# user has changed revistion number
revision_form = forms.RevisionForm(
answer,
revision,
request.POST
)
if revision_form.is_valid():
# Replace with those from the selected revision
rev = revision_form.cleaned_data['revision']
revision = answer.revisions.get(revision = rev)
form = edit_answer_form_class(
answer, revision, user=request.user
)
else:
form = edit_answer_form_class(
answer,
revision,
request.POST,
user=request.user
)
else:
form = edit_answer_form_class(
answer, revision, request.POST, user=request.user
)
revision_form = forms.RevisionForm(answer, revision)
if form.is_valid():
if form.has_changed():
user = form.get_post_user(request.user)
suppress_email = form.cleaned_data['suppress_email']
is_private = form.cleaned_data.get('post_privately', False)
user.edit_answer(
answer=answer,
body_text=form.cleaned_data['text'],
revision_comment=form.cleaned_data['summary'],
wiki=form.cleaned_data.get('wiki', answer.wiki),
is_private=is_private,
suppress_email=suppress_email,
ip_addr=request.META.get('REMOTE_ADDR')
)
signals.answer_edited.send(None,
answer=answer,
user=user,
form_data=form.cleaned_data
)
return HttpResponseRedirect(answer.get_absolute_url())
else:
revision_form = forms.RevisionForm(answer, revision)
form = edit_answer_form_class(answer, revision, user=request.user)
if request.user.can_make_group_private_posts():
form.initial['post_privately'] = answer.is_private()
data = {
'page_class': 'edit-answer-page',
'active_tab': 'questions',
'answer': answer,
'revision': revision,
'revision_form': revision_form,
'form': form,
}
extra_context = context.get_extra(
'ASKBOT_EDIT_ANSWER_PAGE_EXTRA_CONTEXT',
request,
data
)
data.update(extra_context)
return render(request, 'answer_edit.html', data)
except exceptions.PermissionDenied, e:
request.user.message_set.create(message = unicode(e))
return HttpResponseRedirect(answer.get_absolute_url())
#todo: rename this function to post_new_answer
@decorators.check_authorization_to_post(ugettext_lazy('Please log in to make posts'))
@decorators.check_spam('text')
@fix_recaptcha_remote_ip
def answer(request, id, form_class=forms.AnswerForm):#process a new answer
"""view that posts new answer
anonymous users post into anonymous storage
and redirected to login page
authenticated users post directly
"""
question = get_object_or_404(models.Post, post_type='question', id=id)
if askbot_settings.READ_ONLY_MODE_ENABLED:
return HttpResponseRedirect(question.get_absolute_url())
if request.method == "POST":
#this check prevents backward compatilibility
if form_class == forms.AnswerForm:
custom_class_path = getattr(settings, 'ASKBOT_NEW_ANSWER_FORM', None)
if custom_class_path:
form_class = load_module(custom_class_path)
else:
form_class = forms.AnswerForm
form = form_class(request.POST, user=request.user)
if form.is_valid():
if request.user.is_authenticated():
drafts = models.DraftAnswer.objects.filter(
author=request.user,
thread=question.thread
)
drafts.delete()
user = form.get_post_user(request.user)
try:
answer = form.save(
question,
user,
ip_addr=request.META.get('REMOTE_ADDR')
)
signals.new_answer_posted.send(None,
answer=answer,
user=user,
form_data=form.cleaned_data
)
return HttpResponseRedirect(answer.get_absolute_url())
except askbot_exceptions.AnswerAlreadyGiven, e:
request.user.message_set.create(message = unicode(e))
answer = question.thread.get_answers_by_user(user)[0]
return HttpResponseRedirect(answer.get_absolute_url())
except exceptions.PermissionDenied, e:
request.user.message_set.create(message = unicode(e))
else:
request.session.flush()
models.AnonymousAnswer.objects.create(
question=question,
wiki=form.cleaned_data['wiki'],
text=form.cleaned_data['text'],
session_key=request.session.session_key,
ip_addr=request.META.get('REMOTE_ADDR'),
)
return HttpResponseRedirect(url_utils.get_login_url())
return HttpResponseRedirect(question.get_absolute_url())
def __generate_comments_json(obj, user, avatar_size):
"""non-view generates json data for the post comments
"""
models.Post.objects.precache_comments(for_posts=[obj], visitor=user)
comments = obj._cached_comments
# {"Id":6,"PostId":38589,"CreationDate":"an hour ago","Text":"hello there!","UserDisplayName":"Jarrod Dixon","UserUrl":"/users/3/jarrod-dixon","DeleteUrl":null}
json_comments = []
for comment in comments:
if user and user.is_authenticated():
try:
user.assert_can_delete_comment(comment)
#/posts/392845/comments/219852/delete
#todo translate this url
is_deletable = True
except exceptions.PermissionDenied:
is_deletable = False
is_editable = template_filters.can_edit_comment(user, comment)
else:
is_deletable = False
is_editable = False
comment_owner = comment.author
tz = ' ' + template_filters.TIMEZONE_STR
comment_data = {'id' : comment.id,
'object_id': obj.id,
'comment_added_at': str(comment.added_at.replace(microsecond = 0)) + tz,
'html': comment.html,
'user_display_name': escape(comment_owner.username),
'user_profile_url': comment_owner.get_profile_url(),
'user_avatar_url': comment_owner.get_avatar_url(avatar_size),
'user_id': comment_owner.id,
'user_is_administrator': comment_owner.is_administrator(),
'user_is_moderator': comment_owner.is_moderator(),
'is_deletable': is_deletable,
'is_editable': is_editable,
'points': comment.points,
'score': comment.points, #to support js
'upvoted_by_user': getattr(comment, 'upvoted_by_user', False)
}
json_comments.append(comment_data)
data = simplejson.dumps(json_comments)
return HttpResponse(data, content_type="application/json")
@csrf.csrf_protect
@decorators.check_spam('comment')
def post_comments(request):#generic ajax handler to load comments to an object
"""todo: fixme: post_comments is ambigous:
means either get comments for post or
add a new comment to post
"""
# only support get post comments by ajax now
post_type = request.REQUEST.get('post_type', '')
if not request.is_ajax() or post_type not in ('question', 'answer'):
raise Http404 # TODO: Shouldn't be 404! More like 400, 403 or sth more specific
if post_type == 'question' \
and askbot_settings.QUESTION_COMMENTS_ENABLED == False:
raise Http404
elif post_type == 'answer' \
and askbot_settings.ANSWER_COMMENTS_ENABLED == False:
raise Http404
user = request.user
if request.method == 'POST':
form = forms.NewCommentForm(request.POST)
elif request.method == 'GET':
form = forms.GetCommentDataForPostForm(request.GET)
if form.is_valid() == False:
return HttpResponseBadRequest(
_('This content is forbidden'),
mimetype='application/json'
)
post_id = form.cleaned_data['post_id']
avatar_size = form.cleaned_data['avatar_size']
try:
post = models.Post.objects.get(id=post_id)
except models.Post.DoesNotExist:
return HttpResponseBadRequest(
_('Post not found'), mimetype='application/json'
)
if request.method == "GET":
response = __generate_comments_json(post, user, avatar_size)
elif request.method == "POST":
try:
if user.is_anonymous():
msg = _('Sorry, you appear to be logged out and '
'cannot post comments. Please '
'<a href="%(sign_in_url)s">sign in</a>.') % \
{'sign_in_url': url_utils.get_login_url()}
raise exceptions.PermissionDenied(msg)
if askbot_settings.READ_ONLY_MODE_ENABLED:
raise exceptions.PermissionDenied(askbot_settings.READ_ONLY_MESSAGE)
comment = user.post_comment(
parent_post=post,
body_text=form.cleaned_data['comment'],
ip_addr=request.META.get('REMOTE_ADDR')
)
signals.new_comment_posted.send(None,
comment=comment,
user=user,
form_data=form.cleaned_data
)
response = __generate_comments_json(post, user, avatar_size)
except exceptions.PermissionDenied, e:
response = HttpResponseForbidden(unicode(e), content_type="application/json")
return response
@csrf.csrf_protect
@decorators.ajax_only
#@decorators.check_spam('comment')
def edit_comment(request):
if request.user.is_anonymous():
raise exceptions.PermissionDenied(_('Sorry, anonymous users cannot edit comments'))
if askbot_settings.READ_ONLY_MODE_ENABLED:
raise exceptions.PermissionDenied(askbot_settings.READ_ONLY_MESSAGE)
form = forms.EditCommentForm(request.POST)
if form.is_valid() == False:
raise exceptions.PermissionDenied('This content is forbidden')
comment_post = models.Post.objects.get(
post_type='comment',
id=form.cleaned_data['comment_id']
)
revision = request.user.edit_comment(
comment_post=comment_post,
body_text=form.cleaned_data['comment'],
suppress_email=form.cleaned_data['suppress_email'],
ip_addr=request.META.get('REMOTE_ADDR'),
)
is_deletable = template_filters.can_delete_comment(
comment_post.author, comment_post)
is_editable = template_filters.can_edit_comment(
comment_post.author, comment_post)
tz = ' ' + template_filters.TIMEZONE_STR
tz = template_filters.TIMEZONE_STR
timestamp = str(comment_post.added_at.replace(microsecond=0)) + tz
#need this because the post.text is due to the latest approved
#revision, but we may need the suggested revision
comment_post.text = revision.text
comment_post.html = comment_post.parse_post_text()['html']
return {
'id' : comment_post.id,
'object_id': comment_post.parent.id,
'comment_added_at': timestamp,
'html': comment_post.html,
'user_display_name': escape(comment_post.author.username),
'user_url': comment_post.author.get_profile_url(),
'user_id': comment_post.author.id,
'is_deletable': is_deletable,
'is_editable': is_editable,
'score': comment_post.points, #to support unchanged js
'points': comment_post.points,
'voted': comment_post.is_upvoted_by(request.user),
}
@csrf.csrf_protect
def delete_comment(request):
"""ajax handler to delete comment
"""
try:
if request.user.is_anonymous():
msg = _('Sorry, you appear to be logged out and '
'cannot delete comments. Please '
'<a href="%(sign_in_url)s">sign in</a>.') % \
{'sign_in_url': url_utils.get_login_url()}
raise exceptions.PermissionDenied(msg)
if request.is_ajax():
form = forms.ProcessCommentForm(request.POST)
if form.is_valid() == False:
return HttpResponseBadRequest()
comment_id = form.cleaned_data['comment_id']
comment = get_object_or_404(models.Post, post_type='comment', id=comment_id)
request.user.assert_can_delete_comment(comment)
if askbot_settings.READ_ONLY_MODE_ENABLED:
raise exceptions.PermissionDenied(askbot_settings.READ_ONLY_MESSAGE)
parent = comment.parent
comment.delete()
#attn: recalc denormalized field
parent.comment_count = parent.comments.count()
parent.save()
parent.thread.reset_cached_data()
avatar_size = form.cleaned_data['avatar_size']
return __generate_comments_json(parent, request.user, avatar_size)
raise exceptions.PermissionDenied(
_('sorry, we seem to have some technical difficulties')
)
except exceptions.PermissionDenied, e:
return HttpResponseForbidden(
unicode(e),
mimetype = 'application/json'
)
@login_required
@decorators.post_only
@csrf.csrf_protect
def comment_to_answer(request):
if request.user.is_anonymous():
msg = _('Sorry, only logged in users can convert comments to answers. '
'Please <a href="%(sign_in_url)s">sign in</a>.') % \
{'sign_in_url': url_utils.get_login_url()}
raise exceptions.PermissionDenied(msg)
form = forms.ConvertCommentForm(request.POST)
if form.is_valid() == False:
raise Http404
comment = get_object_or_404(
models.Post,
post_type='comment',
id=form.cleaned_data['comment_id']
)
if askbot_settings.READ_ONLY_MODE_ENABLED is False:
request.user.repost_comment_as_answer(comment)
return HttpResponseRedirect(comment.get_absolute_url())
@decorators.post_only
@csrf.csrf_protect
#todo: change the urls config for this
def repost_answer_as_comment(request, destination=None):
assert(
destination in (
'comment_under_question',
'comment_under_previous_answer'
)
)
if request.user.is_anonymous():
msg = _('Sorry, only logged in users can convert answers to comments. '
'Please <a href="%(sign_in_url)s">sign in</a>.') % \
{'sign_in_url': url_utils.get_login_url()}
raise exceptions.PermissionDenied(msg)
answer_id = request.POST.get('answer_id')
if answer_id:
try:
answer_id = int(answer_id)
except (ValueError, TypeError):
raise Http404
answer = get_object_or_404(models.Post,
post_type = 'answer', id=answer_id)
if askbot_settings.READ_ONLY_MODE_ENABLED:
return HttpResponseRedirect(answer.get_absolute_url())
request.user.assert_can_convert_post(post=answer)
if destination == 'comment_under_question':
destination_post = answer.thread._question_post()
else:
#comment_under_previous_answer
destination_post = answer.get_previous_answer(user=request.user)
#todo: implement for comment under other answer
if destination_post is None:
message = _('Error - could not find the destination post')
request.user.message_set.create(message=message)
return HttpResponseRedirect(answer.get_absolute_url())
if len(answer.text) <= askbot_settings.MAX_COMMENT_LENGTH:
answer.post_type = 'comment'
answer.parent = destination_post
new_comment_count = answer.comments.count() + 1
answer.comment_count = 0
answer_comments = models.Post.objects.get_comments().filter(parent=answer)
answer_comments.update(parent=destination_post)
#why this and not just "save"?
answer.parse_and_save(author=answer.author)
answer.thread.update_answer_count()
answer.parent.comment_count += new_comment_count
answer.parent.save()
answer.thread.reset_cached_data()
else:
message = _(
'Cannot convert, because text has more characters than '
'%(max_chars)s - maximum allowed for comments'
) % {'max_chars': askbot_settings.MAX_COMMENT_LENGTH}
request.user.message_set.create(message=message)
return HttpResponseRedirect(answer.get_absolute_url())
else:
raise Http404
| gpl-3.0 |
GoogleChrome/big-rig | app/src/thirdparty/telemetry/internal/backends/mandoline/desktop_mandoline_finder.py | 9 | 5077 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds desktop mandoline browsers that can be controlled by telemetry."""
import os
import sys
from telemetry.core import exceptions
from telemetry.core import platform as platform_module
from telemetry.internal.backends.mandoline import desktop_mandoline_backend
from telemetry.internal.browser import browser
from telemetry.internal.browser import possible_browser
from telemetry.internal.platform import desktop_device
from telemetry.internal.util import path
class PossibleDesktopMandolineBrowser(possible_browser.PossibleBrowser):
"""A desktop mandoline browser that can be controlled."""
def __init__(self, browser_type, finder_options, executable,
browser_directory):
target_os = sys.platform.lower()
super(PossibleDesktopMandolineBrowser, self).__init__(
browser_type, target_os, supports_tab_control=False)
assert browser_type in FindAllBrowserTypes(finder_options), (
'Please add %s to desktop_mandoline_finder.FindAllBrowserTypes' %
browser_type)
self._local_executable = executable
self._browser_directory = browser_directory
def __repr__(self):
return 'PossibleDesktopMandolineBrowser(type=%s, executable=%s)' % (
self.browser_type, self._local_executable)
def _InitPlatformIfNeeded(self):
if self._platform:
return
self._platform = platform_module.GetHostPlatform()
# pylint: disable=W0212
self._platform_backend = self._platform._platform_backend
def Create(self, finder_options):
self._InitPlatformIfNeeded()
mandoline_backend = desktop_mandoline_backend.DesktopMandolineBackend(
self._platform_backend, finder_options.browser_options,
self._local_executable, self._browser_directory)
return browser.Browser(
mandoline_backend, self._platform_backend, self._credentials_path)
def SupportsOptions(self, finder_options):
if len(finder_options.extensions_to_load) != 0:
return False
return True
def UpdateExecutableIfNeeded(self):
pass
def last_modification_time(self):
if os.path.exists(self._local_executable):
return os.path.getmtime(self._local_executable)
return -1
def SelectDefaultBrowser(possible_browsers):
if not possible_browsers:
return None
return max(possible_browsers, key=lambda b: b.last_modification_time())
def CanFindAvailableBrowsers():
os_name = platform_module.GetHostPlatform().GetOSName()
return os_name == 'win' or os_name == 'linux'
def CanPossiblyHandlePath(target_path):
_, extension = os.path.splitext(target_path.lower())
if sys.platform.startswith('linux'):
return not extension
elif sys.platform.startswith('win'):
return extension == '.exe'
return False
def FindAllBrowserTypes(_):
return [
'mandoline-debug',
'mandoline-debug_x64',
'mandoline-release',
'mandoline-release_x64',]
def FindAllAvailableBrowsers(finder_options, device):
"""Finds all the desktop mandoline browsers available on this machine."""
if not isinstance(device, desktop_device.DesktopDevice):
return []
browsers = []
if not CanFindAvailableBrowsers():
return []
# Look for a browser in the standard chrome build locations.
if finder_options.chrome_root:
chrome_root = finder_options.chrome_root
else:
chrome_root = path.GetChromiumSrcDir()
if sys.platform.startswith('linux'):
mandoline_app_name = 'mandoline'
elif sys.platform.startswith('win'):
mandoline_app_name = 'mandoline.exe'
else:
raise Exception('Platform not recognized')
# Add the explicit browser executable if given and we can handle it.
if (finder_options.browser_executable and
CanPossiblyHandlePath(finder_options.browser_executable)):
normalized_executable = os.path.expanduser(
finder_options.browser_executable)
if path.IsExecutable(normalized_executable):
browser_directory = os.path.dirname(finder_options.browser_executable)
browsers.append(PossibleDesktopMandolineBrowser('exact', finder_options,
normalized_executable,
browser_directory))
else:
raise exceptions.PathMissingError(
'%s specified by --browser-executable does not exist',
normalized_executable)
def AddIfFound(browser_type, build_dir, type_dir, app_name):
browser_directory = os.path.join(chrome_root, build_dir, type_dir)
app = os.path.join(browser_directory, app_name)
if path.IsExecutable(app):
browsers.append(PossibleDesktopMandolineBrowser(
browser_type, finder_options, app, browser_directory))
return True
return False
# Add local builds.
for build_dir, build_type in path.GetBuildDirectories():
AddIfFound('mandoline-' + build_type.lower(), build_dir, build_type,
mandoline_app_name)
return browsers
| apache-2.0 |
Jusedawg/SickRage | lib/hachoir_parser/image/common.py | 95 | 1429 | from hachoir_core.field import FieldSet, UserVector, UInt8
class RGB(FieldSet):
color_name = {
( 0, 0, 0): "Black",
(255, 0, 0): "Red",
( 0, 255, 0): "Green",
( 0, 0, 255): "Blue",
(255, 255, 255): "White",
}
static_size = 24
def createFields(self):
yield UInt8(self, "red", "Red")
yield UInt8(self, "green", "Green")
yield UInt8(self, "blue", "Blue")
def createDescription(self):
rgb = self["red"].value, self["green"].value, self["blue"].value
name = self.color_name.get(rgb)
if not name:
name = "#%02X%02X%02X" % rgb
return "RGB color: " + name
class RGBA(RGB):
static_size = 32
def createFields(self):
yield UInt8(self, "red", "Red")
yield UInt8(self, "green", "Green")
yield UInt8(self, "blue", "Blue")
yield UInt8(self, "alpha", "Alpha")
def createDescription(self):
description = RGB.createDescription(self)
opacity = self["alpha"].value*100/255
return "%s (opacity: %s%%)" % (description, opacity)
class PaletteRGB(UserVector):
item_class = RGB
item_name = "color"
def createDescription(self):
return "Palette of %u RGB colors" % len(self)
class PaletteRGBA(PaletteRGB):
item_class = RGBA
def createDescription(self):
return "Palette of %u RGBA colors" % len(self)
| gpl-3.0 |
hopeall/odoo | openerp/exceptions.py | 312 | 3157 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" OpenERP core exceptions.
This module defines a few exception types. Those types are understood by the
RPC layer. Any other exception type bubbling until the RPC layer will be
treated as a 'Server error'.
If you consider introducing new exceptions, check out the test_exceptions addon.
"""
# kept for backward compatibility
class except_orm(Exception):
def __init__(self, name, value):
self.name = name
self.value = value
self.args = (name, value)
class Warning(Exception):
pass
class RedirectWarning(Exception):
""" Warning with a possibility to redirect the user instead of simply
diplaying the warning message.
Should receive as parameters:
:param int action_id: id of the action where to perform the redirection
:param string button_text: text to put on the button that will trigger
the redirection.
"""
class AccessDenied(Exception):
""" Login/password error. No message, no traceback. """
def __init__(self):
super(AccessDenied, self).__init__('Access denied.')
self.traceback = ('', '', '')
class AccessError(except_orm):
""" Access rights error. """
def __init__(self, msg):
super(AccessError, self).__init__('AccessError', msg)
class MissingError(except_orm):
""" Missing record(s). """
def __init__(self, msg):
super(MissingError, self).__init__('MissingError', msg)
class ValidationError(except_orm):
def __init__(self, msg):
super(ValidationError, self).__init__('ValidateError', msg)
class DeferredException(Exception):
""" Exception object holding a traceback for asynchronous reporting.
Some RPC calls (database creation and report generation) happen with
an initial request followed by multiple, polling requests. This class
is used to store the possible exception occuring in the thread serving
the first request, and is then sent to a polling request.
('Traceback' is misleading, this is really a exc_info() triple.)
"""
def __init__(self, msg, tb):
self.message = msg
self.traceback = tb
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kmshi/miroguide | channelguide/channels/migrations/0003_switch_user_ids.py | 1 | 13086 |
from south.db import db
from django.db import models
from channelguide.channels.models import *
class Migration:
no_dry_run = True
def forwards(self, orm):
"Write your forwards migration here"
for channel in orm.Channel.objects.all():
for field in ('owner', 'featured_by', 'moderator_shared_by',
'last_moderated_by'):
value = getattr(channel, '%s_id' % field)
if value:
profile = orm['user_profile.UserProfile'].objects.get(
pk=value)
setattr(channel, field, profile.user)
else:
setattr(channel, field, None)
channel.save()
def backwards(self, orm):
"Write your backwards migration here"
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'user_profile.userprofile': {
'Meta': {'db_table': "'user'"},
'age': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'blocked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'channel_owner_emails': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
'email_updates': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'filter_languages': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'fname': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'hashed_password': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'im_type': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'im_username': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'language': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5'}),
'lname': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'moderator_board_email': ('django.db.models.fields.CharField', [], {'default': "'S'", 'max_length': '1'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '1'}),
'show_explicit': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'shown_languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['labels.Language']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'status_emails': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'to_field': "'username'", 'unique': 'True', 'db_column': "'username'"}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'channels.addedchannel': {
'Meta': {'unique_together': "[('channel', 'user')]", 'db_table': "'cg_channel_added'"},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'added_channels'", 'to': "orm['channels.Channel']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'added_channels'", 'to': "orm['auth.User']"})
},
'channels.channel': {
'Meta': {'db_table': "'cg_channel'"},
'adult': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'approved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['labels.Category']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'featured_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'featured_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'feed_etag': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'feed_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'geoip': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'hi_def': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'channels'", 'db_column': "'primary_language_id'", 'to': "orm['labels.Language']"}),
'last_moderated_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_moderated_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'license': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40'}),
'moderator_shared_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'moderator_shared_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderator_shared_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'channels'", 'to': "orm['auth.User']"}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'publisher': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['labels.Tag']"}),
'thumbnail_extension': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '8', 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'waiting_for_reply_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'was_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '255'})
},
'channels.item': {
'Meta': {'db_table': "'cg_channel_item'"},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['channels.Channel']"}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'size': ('django.db.models.fields.IntegerField', [], {}),
'thumbnail_extension': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '8', 'null': 'True'}),
'thumbnail_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255'})
},
'channels.lastapproved': {
'Meta': {'db_table': "'cg_channel_last_approved'"},
'timestamp': ('django.db.models.fields.DateTimeField', [], {'primary_key': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'labels.category': {
'Meta': {'db_table': "'cg_category'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'on_frontpage': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
'labels.language': {
'Meta': {'db_table': "'cg_channel_language'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'labels.tag': {
'Meta': {'db_table': "'cg_tag'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['channels']
| agpl-3.0 |
pombredanne/PyGithub | github/__init__.py | 2 | 2945 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. #
# http://pygithub.github.io/PyGithub/v1/index.html #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
"""
The primary class you will instanciate is :class:`github.MainClass.Github`.
From its ``get_``, ``create_`` methods, you will obtain instances of all Github objects
like :class:`github.NamedUser.NamedUser` or :class:`github.Repository.Repository`.
All classes inherit from :class:`github.GithubObject.GithubObject`.
"""
import logging
from MainClass import Github
from GithubException import GithubException, BadCredentialsException, UnknownObjectException, BadUserAgentException, RateLimitExceededException, BadAttributeException
from InputFileContent import InputFileContent
from InputGitAuthor import InputGitAuthor
from InputGitTreeElement import InputGitTreeElement
def enable_console_debug_logging(): # pragma no cover (Function useful only outside test environment)
"""
This function sets up a very simple logging configuration (log everything on standard output) that is useful for troubleshooting.
"""
logger = logging.getLogger("github")
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
| gpl-3.0 |
QingChenmsft/azure-cli | src/command_modules/azure-cli-resource/azure/cli/command_modules/resource/_help.py | 2 | 20903 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.help_files import helps
# pylint: disable=line-too-long, too-many-lines
helps['managedapp'] = """
type: group
short-summary: Manage template solutions provided and maintained by Independent Software Vendors (ISVs).
"""
helps['managedapp definition'] = """
type: group
short-summary: Manage Azure Managed Applications.
"""
helps['managedapp create'] = """
type: command
short-summary: Create a managed application.
examples:
- name: Create a managed application of kind 'ServiceCatalog'. This requires a valid managed application definition ID.
text: |
az managedapp create -g MyResourceGroup -n MyManagedApp -l westcentralus --kind ServiceCatalog \\
-m "/subscriptions/{SubID}/resourceGroups/{ManagedRG}" \\
-d "/subscriptions/{SubID}/resourceGroups/{MyRG}/providers/Microsoft.Solutions/applianceDefinitions/{ManagedAppDef}"
- name: Create a managed application of kind 'MarketPlace'. This requires a valid plan, containing details about existing marketplace package like plan name, version, publisher and product.
text: |
az managedapp create -g MyResourceGroup -n MyManagedApp -l westcentralus --kind MarketPlace \\
-m "/subscriptions/{SubID}/resourceGroups/myManagedRG" \\
--plan-name ContosoAppliance --plan-version "1.0" --plan-product "contoso-appliance" --plan-publisher Contoso
"""
helps['managedapp definition create'] = """
type: command
short-summary: Create a managed application definition.
examples:
- name: Create a managed application defintion.
text: >
az managedapp definition create -g MyResourceGroup -n MyManagedAppDef -l eastus --display-name "MyManagedAppDef" \\
--description "My Managed App Def description" -a "myPrincipalId:myRoleId" --lock-level None \\
--package-file-uri "https://path/to/myPackage.zip"
"""
helps['managedapp definition delete'] = """
type: command
short-summary: Delete a managed application definition.
"""
helps['managedapp definition list'] = """
type: command
short-summary: List managed application definitions.
"""
helps['managedapp delete'] = """
type: command
short-summary: Delete a managed application.
"""
helps['managedapp list'] = """
type: command
short-summary: List managed applications.
"""
helps['lock'] = """
type: group
short-summary: Manage Azure locks.
parameters:
- name: --resource-type
type: string
text: The name of the resource type. May have a provider namespace.
- name: --resource-provider-namespace
type: string
text: The name of the resource provider.
- name: --parent-resource-path
type: string
text: The path to the parent resource of the resource being locked.
- name: --resource-name
type: string
text: The name of the resource this lock applies to.
"""
helps['lock create'] = """
type: command
short-summary: Create a lock.
long-summary: 'Locks can exist at three different scopes: subscription, resource group and resource.'
parameters:
- name: --notes
type: string
short-summary: Notes about this lock.
examples:
- name: Create a read-only subscription level lock.
text: >
az lock create --name lockName --resource-group group --lock-type ReadOnly
"""
helps['lock delete'] = """
type: commands
short-summary: Delete a lock.
examples:
- name: Delete a resource-group-level lock
text: >
az lock delete --name lockName --resource-group group
"""
helps['lock list'] = """
type: command
short-summary: List lock information.
examples:
- name: List out the locks on a vnet resource. Includes locks in the associated group and subscription.
text: >
az lock list --resource-name myvnet --resource-type Microsoft.Network/virtualNetworks -g group
- name: List out all locks on the subscription level
text: >
az lock list
"""
helps['lock show'] = """
type: command
short-summary: Show the properties of a lock
examples:
- name: Show a subscription level lock
text: >
az lock show -n lockname
"""
helps['lock update'] = """
type: command
short-summary: Update a lock.
parameters:
- name: --notes
type: string
short-summary: Notes about this lock.
examples:
- name: Update a resource-group level lock with new notes and type
text: >
az lock update --name lockName --resource-group group --notes newNotesHere --lock-type CanNotDelete
"""
helps['policy'] = """
type: group
short-summary: Manage resource policies.
"""
helps['policy definition'] = """
type: group
short-summary: Manage resource policy definitions.
"""
helps['policy definition create'] = """
type: command
short-summary: Create a policy definition.
parameters:
- name: --rules
type: string
short-summary: Policy rules in JSON format, or a path to a file containing JSON rules.
examples:
- name: Create a read-only policy.
text: |
az policy definition create -n readOnlyStorage --rules \\
{ \\
"if": \\
{ \\
"source": "action", \\
"equals": "Microsoft.Storage/storageAccounts/write" \\
}, \\
"then": \\
{ \\
"effect": "deny" \\
} \\
}
- name: Create a policy parameter definition with the following example
text: |
{
"allowedLocations": {
"type": "array",
"metadata": {
"description": "The list of locations that can be specified
when deploying resources",
"strongType": "location",
"displayName": "Allowed locations"
}
}
}
"""
helps['policy definition delete'] = """
type: command
short-summary: Delete a policy definition.
"""
helps['policy definition show'] = """
type: command
short-summary: get a policy definition.
"""
helps['policy definition update'] = """
type: command
short-summary: Update a policy definition.
"""
helps['policy definition list'] = """
type: command
short-summary: List policy definitions.
"""
helps['policy assignment'] = """
type: group
short-summary: Manage resource policy assignments.
"""
helps['policy assignment create'] = """
type: command
short-summary: Create a resource policy assignment.
examples:
- name: Provide rule parameter values with the following example
text: |
{
"allowedLocations": {
"value": [
"australiaeast",
"eastus",
"japaneast"
]
}
}
"""
helps['policy assignment delete'] = """
type: command
short-summary: Delete a resource policy assignment.
"""
helps['policy assignment show'] = """
type: command
short-summary: Show a resource policy assignment.
"""
helps['policy assignment list'] = """
type: command
short-summary: List resource policy assignments.
"""
helps['resource'] = """
type: group
short-summary: Manage Azure resources.
"""
helps['resource list'] = """
type: command
short-summary: List resources.
examples:
- name: List all resources in the West US region.
text: >
az resource list --location westus
- name: List all resources with the name 'resourceName'.
text: >
az resource list --name 'resourceName'
- name: List all resources with the tag 'test'.
text: >
az resource list --tag test
- name: List all resources with a tag that starts with 'test'.
text: >
az resource list --tag test*
- name: List all resources with the tag 'test' that have the value 'example'.
text: >
az resource list --tag test=example
"""
helps['resource show'] = """
type: command
short-summary: Get the details of a resource.
examples:
- name: Show a virtual machine resource named 'MyVm'.
text: >
az vm show -g MyResourceGroup -n MyVm --resource-type "Microsoft.Compute/virtualMachines"
- name: Show a web app using a resource identifier.
text: >
az resource show --id /subscriptions/{SubID}/resourceGroups/{MyRG}/providers/Microsoft.Web/sites/{MyWebapp}
- name: Show a subnet in the 'Microsoft.Network' namespace which belongs to the virtual network 'MyVnet'.
text: >
az resource show -g MyResourceGroup -n MySubnet --namespace Microsoft.Network --parent virtualnetworks/MyVnet --resource-type subnets
- name: Show a subnet using a resource identifier.
text: >
az resource show --id /subscriptions/{SubID}/resourceGroups/{MyRG}/providers/Microsoft.Network/virtualNetworks/{MyVnet}/subnets/{MySubnet}
- name: Show an application gateway path rule.
text: >
az resource show -g MyResourceGroup --namespace Microsoft.Network --parent applicationGateways/ag1/urlPathMaps/map1 --resource-type pathRules -n rule1
"""
helps['resource delete'] = """
type: command
short-summary: Delete a resource.
examples:
- name: Delete a virtual machine named 'MyVm'.
text: >
az vm delete -g MyResourceGroup -n MyVm --resource-type "Microsoft.Compute/virtualMachines"
- name: Delete a web app using a resource identifier.
text: >
az resource delete --id /subscriptions/{SubID}/resourceGroups/{MyRG}/providers/Microsoft.Web/sites/{MyWebApp}
- name: Delete a subnet using a resource identifier.
text: >
az resource delete --id /subscriptions/{SubID}/resourceGroups/{MyRG}/providers/Microsoft.Network/virtualNetworks/{MyVNET}/subnets/{MySubnet}
"""
helps['resource tag'] = """
type: command
short-summary: Tag a resource.
examples:
- name: Tag the virtual machine 'MyVm' with the key 'vmlist' and value 'vm1'.
text: >
az resource tag --tags vmlist=vm1 -g MyResourceGroup -n MyVm --resource-type "Microsoft.Compute/virtualMachines"
- name: Tag a web app with the key 'vmlist' and value 'vm1', using a resource identifier.
text: >
az resource tag --tags vmlist=vm1 --id /subscriptions/{SubID}/resourceGroups/{MyRG}/providers/Microsoft.Web/sites/{MyWebApp}
"""
helps['resource create'] = """
type: command
short-summary: create a resource.
examples:
- name: Create an API app by providing a full JSON configuration.
text: |
az resource create -g myRG -n myApiApp --resource-type Microsoft.web/sites --is-full-object --properties \\
'{\\
"kind": "api",\\
"location": "West US",\\
"properties": {\\
"serverFarmId": "/subscriptions/{SubID}/resourcegroups/{MyRG}/providers/Microsoft.Web/serverfarms/{MyServicePlan}"\\
}\\
}'
- name: Create a resource by loading JSON configuration from a file.
text: >
az resource create -g myRG -n myApiApp --resource-type Microsoft.web/sites --is-full-object --properties @jsonConfigFile
- name: Create a web app with the minimum required configuration information.
text: |
az resource create -g myRG -n myWeb --resource-type Microsoft.web/sites --properties \\
{ \\
"serverFarmId":"/subscriptions/{SubID}/resourcegroups/{MyRG}/providers/Microsoft.Web/serverfarms/{MyServicePlan}" \\
}
"""
helps['resource update'] = """
type: command
short-summary: Update a resource.
"""
helps['feature'] = """
type: group
short-summary: Manage resource provider features.
"""
helps['group'] = """
type: group
short-summary: Manage resource groups and template deployments.
"""
helps['group exists'] = """
type: command
short-summary: Check if a resource group exists.
examples:
- name: Check if 'MyResourceGroup' exists.
text: >
az group exists -n MyResourceGroup
"""
helps['group create'] = """
type: command
short-summary: Create a new resource group.
examples:
- name: Create a new resource group in the West US region.
text: >
az group create -l westus -n MyResourceGroup
"""
helps['group delete'] = """
type: command
short-summary: Delete a resource group.
examples:
- name: Delete a resource group.
text: >
az group delete -n MyResourceGroup
"""
helps['group list'] = """
type: command
short-summary: List resource groups.
examples:
- name: List all resource groups located in the West US region.
text: >
az group list --query "[?location=='westus']"
"""
helps['group update'] = """
type: command
short-summary: Update a resource group.
"""
helps['group wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the resource group is met.
"""
helps['group deployment'] = """
type: group
short-summary: Manage Azure Resource Manager deployments.
"""
helps['group deployment create'] = """
type: command
short-summary: Start a deployment.
parameters:
- name: --parameters
short-summary: Supply deployment parameter values.
long-summary: >
Parameters may be supplied from a file using the `@{path}` syntax, a JSON string, or as <KEY=VALUE> pairs. Parameters are evaluated in order, so when a value is assigned twice, the latter value will be used.
It is recommended that you supply your parameters file first, and then override selectively using KEY=VALUE syntax.
examples:
- name: Create a deployment from a remote template file, using parameters from a local JSON file.
text: >
az group deployment create -g MyResourceGroup --template-uri https://myresource/azuredeploy.json --parameters @myparameters.json
- name: Create a deployment from a local template file, using parameters from a JSON string.
text: |
az group deployment create -g MyResourceGroup --template-file azuredeploy.json --parameters \\
'{ \\
"location": {\\
"value": "westus" \\
} \\
}'
- name: Create a deployment from a local template, using a parameter file and selectively overriding key/value pairs.
text: >
az group deployment create -g MyResourceGroup --template-file azuredeploy.json --parameters @params.json --parameters MyValue=This MyArray=@array.json
"""
helps['group deployment export'] = """
type: command
short-summary: Export the template used for a deployment.
"""
helps['group deployment validate'] = """
type: command
short-summary: Validate whether a template is syntactically correct.
parameters:
- name: --parameters
short-summary: Supply deployment parameter values.
long-summary: >
Parameters may be supplied from a file using the `@{path}` syntax, a JSON string, or as <KEY=VALUE> pairs. Parameters are evaluated in order, so when a value is assigned twice, the latter value will be used.
It is recommended that you supply your parameters file first, and then override selectively using KEY=VALUE syntax.
"""
helps['group deployment wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a deployment condition is met.
"""
helps['group deployment operation'] = """
type: group
short-summary: Manage deployment operations.
"""
helps['provider'] = """
type: group
short-summary: Manage resource providers.
"""
helps['provider register'] = """
type: command
short-summary: Register a provider.
"""
helps['provider unregister'] = """
type: command
short-summary: Unregister a provider.
"""
helps['provider operation'] = """
type: group
short-summary: Get provider operations metadatas.
"""
helps['provider operation show'] = """
type: command
short-summary: Get an individual provider's operations.
"""
helps['provider operation list'] = """
type: command
short-summary: Get operations from all providers.
"""
helps['tag'] = """
type: group
short-summary: Manage resource tags.
"""
helps['resource link'] = """
type: group
short-summary: Manage links between resources.
long-summary: >
Linking is a feature of the Resource Manager. It enables declaring relationships between resources even if they do not reside in the same resource group.
Linking has no impact on resource usage, no impact on billing, and no impact on role-based access. It allows for managing multiple resources across groups
as a single unit.
"""
helps['resource link create'] = """
type: command
short-summary: Create a new link between resources.
long-summary: A link-id is of the form /subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/{provider-namespace}/{resource-type}/{resource-name}/Microsoft.Resources/links/{link-name}
examples:
- name: Create a link from <link-id> to <resource-id> with notes "some notes to explain this link"
text: >
az resource link create --link-id <link-id> --target-id <resource-id> --notes "some notes to explain this link"
"""
helps['resource link update'] = """
type: command
short-summary: Update link between resources.
long-summary: A link-id is of the form /subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/{provider-namespace}/{resource-type}/{resource-name}/Microsoft.Resources/links/{link-name}
examples:
- name: Update the notes for <link-id> notes "some notes to explain this link"
text: >
az resource link update --link-id <link-id> --notes "some notes to explain this link"
"""
helps['resource link delete'] = """
type: command
short-summary: Delete a link between resources.
long-summary: A link-id is of the form /subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/{provider-namespace}/{resource-type}/{resource-name}/Microsoft.Resources/links/{link-name}
examples:
- name: Delete link <link-id>
text: >
az resource link delete --link-id <link-id>
"""
helps['resource link list'] = """
type: command
short-summary: List resource links.
examples:
- name: List links, filtering with <filter-string>
text: >
az resource link list --filter <filter-string>
- name: List all links at /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myGroup
text: >
az resource link list --scope /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myGroup
"""
helps['resource link show'] = """
type: command
short-summary: Get details for a resource link.
long-summary: A link-id is of the form /subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/{provider-namespace}/{resource-type}/{resource-name}/Microsoft.Resources/links/{link-name}
examples:
- name: Show the <link-id> resource link.
text: >
az resource link show --link-id <link-id>
"""
| mit |
Aloomaio/graphite-web | webapp/graphite/composer/views.py | 58 | 3906 | """Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import os
from smtplib import SMTP
from socket import gethostname
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEImage import MIMEImage
from httplib import HTTPConnection
from urlparse import urlsplit
from time import ctime, strftime
from traceback import format_exc
from graphite.util import getProfile
from graphite.logger import log
from graphite.account.models import MyGraph
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
def composer(request):
profile = getProfile(request)
context = {
'queryString' : request.GET.urlencode().replace('+','%20'),
'showTarget' : request.GET.get('showTarget',''),
'user' : request.user,
'profile' : profile,
'showMyGraphs' : int( profile.user.username != 'default' ),
'searchEnabled' : int( os.access(settings.INDEX_FILE, os.R_OK) ),
'debug' : settings.DEBUG,
'jsdebug' : settings.DEBUG,
}
return render_to_response("composer.html",context)
def mygraph(request):
profile = getProfile(request, allowDefault=False)
if not profile:
return HttpResponse( "You are not logged in!" )
action = request.GET['action']
graphName = request.GET['graphName']
if not graphName:
return HttpResponse("You must type in a graph name.")
if action == 'save':
url = request.GET['url']
try:
existingGraph = profile.mygraph_set.get(name=graphName)
existingGraph.url = url
existingGraph.save()
except ObjectDoesNotExist:
try:
newGraph = MyGraph(profile=profile,name=graphName,url=url)
newGraph.save()
except:
log.exception("Failed to create new MyGraph in /composer/mygraph/, graphName=%s" % graphName)
return HttpResponse("Failed to save graph %s" % graphName)
return HttpResponse("SAVED")
elif action == 'delete':
try:
existingGraph = profile.mygraph_set.get(name=graphName)
existingGraph.delete()
except ObjectDoesNotExist:
return HttpResponse("No such graph '%s'" % graphName)
return HttpResponse("DELETED")
else:
return HttpResponse("Invalid operation '%s'" % action)
def send_email(request):
try:
recipients = request.GET['to'].split(',')
url = request.GET['url']
proto, server, path, query, frag = urlsplit(url)
if query: path += '?' + query
conn = HTTPConnection(server)
conn.request('GET',path)
resp = conn.getresponse()
assert resp.status == 200, "Failed HTTP response %s %s" % (resp.status, resp.reason)
rawData = resp.read()
conn.close()
message = MIMEMultipart()
message['Subject'] = "Graphite Image"
message['To'] = ', '.join(recipients)
message['From'] = 'composer@%s' % gethostname()
text = MIMEText( "Image generated by the following graphite URL at %s\r\n\r\n%s" % (ctime(),url) )
image = MIMEImage( rawData )
image.add_header('Content-Disposition', 'attachment', filename="composer_" + strftime("%b%d_%I%M%p.png"))
message.attach(text)
message.attach(image)
s = SMTP(settings.SMTP_SERVER)
s.sendmail('composer@%s' % gethostname(),recipients,message.as_string())
s.quit()
return HttpResponse( "OK" )
except:
return HttpResponse( format_exc() )
| apache-2.0 |
germanovm/vdsm | vdsm/network/utils.py | 2 | 1110 | # Copyright 2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
def remove_custom_bond_option(options):
""" Removes 'custom' option from bond options string.
>>> remove_custom_bond_option('custom=foo=bar mode=1')
'mode=1'
"""
return ' '.join((option for option in options.split()
if not option.startswith('custom=')))
| gpl-2.0 |
skdaccess/skdaccess | skdaccess/geo/srtm/cache/data_fetcher.py | 2 | 10677 | # The MIT License (MIT)
# Copyright (c) 2016 Massachusetts Institute of Technology
#
# Authors: Cody Rude, Guillaume Rongier
# This software has been created in projects supported by the US National
# Science Foundation and NASA (PI: Pankratius)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Scikit Data Access imports
from skdaccess.framework.data_class import DataFetcherCache, ImageWrapper
from skdaccess.utilities.support import convertToStr
from skdaccess.utilities.image_util import AffineGlobalCoords, convertBinCentersToEdges
# 3rd party imports
import pandas as pd
import numpy as np
import gdal
from pkg_resources import resource_filename
# Standard library imports
from collections import OrderedDict
from calendar import monthrange
from zipfile import ZipFile
import os
class DataFetcher(DataFetcherCache):
''' DataFetcher for retrieving data from the Shuttle Radar Topography Mission '''
def __init__(self, lat_tile_start, lat_tile_end, lon_tile_start, lon_tile_end,
username, password, arcsecond_sampling = 1, mask_water = True,
store_geolocation_grids=False):
'''
Initialize Data Fetcher
@param lat_tile_start: Latitude of the southwest corner of the starting tile
@param lat_tile_end: Latitude of the southwset corner of the last tile
@param lon_tile_start: Longitude of the southwest corner of the starting tile
@param lon_tile_end: Longitude of the southwest corner of the last tile
@param username: NASA Earth Data username
@param password: NASA Earth Data Password
@param arcsecond_sampling: Sample spacing of the SRTM data, either 1 arc-
second or 3 arc-seconds
@param mask_water: True if the water bodies should be masked, false otherwise
@param store_geolocation_grids: Store grids of latitude and longitude in the metadata
'''
assert arcsecond_sampling == 1 or arcsecond_sampling == 3, "Sampling should be 1 or 3 arc-seconds"
self.lat_tile_start = lat_tile_start
self.lat_tile_end = lat_tile_end
self.lon_tile_start = lon_tile_start
self.lon_tile_end = lon_tile_end
self.username = username
self.password = password
self.arcsecond_sampling = arcsecond_sampling
self.mask_water = mask_water
self.store_geolocation_grids = store_geolocation_grids
self._missing_data_projection = '\n'.join([
'GEOGCS["WGS 84",',
' DATUM["WGS_1984",',
' SPHEROID["WGS 84",6378137,298.257223563,',
' AUTHORITY["EPSG","7030"]],',
' AUTHORITY["EPSG","6326"]],',
' PRIMEM["Greenwich",0,',
' AUTHORITY["EPSG","8901"]],',
' UNIT["degree",0.0174532925199433,',
' AUTHORITY["EPSG","9122"]],',
' AUTHORITY["EPSG","4326"]]'
])
super(DataFetcher, self).__init__()
def output(self):
'''
Generate SRTM data wrapper
@return SRTM Image Wrapper
'''
lat_tile_array = np.arange(self.lat_tile_start, self.lat_tile_end+1)
lon_tile_array = np.arange(self.lon_tile_start, self.lon_tile_end+1)
lat_grid,lon_grid = np.meshgrid(lat_tile_array, lon_tile_array)
lat_grid = lat_grid.ravel()
lon_grid = lon_grid.ravel()
filename_root = '.SRTMGL1.'
base_url = 'https://e4ftl01.cr.usgs.gov/MEASURES/'
folder_root = 'SRTMGL1.003/2000.02.11/'
if self.arcsecond_sampling == 3:
filename_root = '.SRTMGL3.'
folder_root = 'SRTMGL3.003/2000.02.11/'
base_url += folder_root
filename_list = []
for lat, lon in zip(lat_grid, lon_grid):
if lat < 0:
lat_label = 'S'
lat = np.abs(lat)
else:
lat_label = 'N'
if lon < 0:
lon_label = 'W'
lon = np.abs(lon)
else:
lon_label = 'E'
filename_list.append(lat_label + convertToStr(lat, 2) + lon_label + convertToStr(lon, 3) + filename_root + 'hgt.zip')
if self.mask_water == True:
filename_list.append(lat_label + convertToStr(lat, 2) + lon_label + convertToStr(lon, 3) + filename_root + 'num.zip')
# Read in list of available data
srtm_list_filename = 'srtm_gl1.txt'
if self.arcsecond_sampling == 3:
srtm_list_filename = 'srtm_gl3.txt'
srtm_support_filename = resource_filename('skdaccess', os.path.join('support',srtm_list_filename))
available_file_list = open(srtm_support_filename).readlines()
available_file_list = [filename.strip() for filename in available_file_list]
requested_files = pd.DataFrame({'Filename' : filename_list})
requested_files['Valid'] = [ '.'.join(filename.split('.')[0:-2]) in available_file_list for filename in filename_list ]
valid_filename_list = requested_files.loc[ requested_files['Valid']==True, 'Filename'].tolist()
url_list = [base_url + filename for filename in valid_filename_list]
downloaded_file_list = self.cacheData('srtm', url_list, self.username, self.password,
'https://urs.earthdata.nasa.gov')
requested_files.loc[ requested_files['Valid']==True, 'Full Path'] = downloaded_file_list
def getCoordinates(filename):
'''
Determine the longitude and latitude of the lowerleft corner of the input filename
@param in_filename: Input SRTM filename
@return Latitude of southwest corner, Longitude of southwest corner
'''
lat_start = int(filename[1:3])
if filename[0] == 'S':
lat_start *= -1
lon_start = int(filename[4:7])
if filename[3] == 'W':
lon_start *= -1
return lat_start, lon_start
data_dict = OrderedDict()
metadata_dict = OrderedDict()
array_shape = (3601,3601)
if self.arcsecond_sampling == 3:
array_shape = (1201,1201)
file_slice = slice(None)
water_value = 0
if self.mask_water == True:
file_slice = slice(0, -1, 2)
water_value = np.nan
for i in requested_files.index[file_slice]:
hgt_full_path = requested_files.at[i, 'Full Path']
hgt_filename = requested_files.at[i, 'Filename']
label = hgt_filename[:7]
lat_start, lon_start = getCoordinates(hgt_filename)
metadata_dict[label] = OrderedDict()
x_res = 1.0 / (array_shape[0]-1)
y_res = 1.0 / (array_shape[1]-1)
extents = [
lon_start - x_res / 2,
lon_start + 1 + x_res / 2,
lat_start - y_res / 2,
lat_start + 1 + y_res / 2
]
if requested_files.at[i, 'Valid']:
masked_dem_data = np.ones(array_shape)
if self.mask_water == True and requested_files.at[i + 1, 'Valid']:
num_full_path = requested_files.at[i + 1, 'Full Path']
num_filename = requested_files.at[i + 1, 'Full Path']
zipped_num_data = ZipFile(num_full_path)
zipped_num_full_path = zipped_num_data.infolist()[0].filename
num_data = np.frombuffer(zipped_num_data.open(zipped_num_full_path).read(),
np.dtype('uint8')).reshape(array_shape)
masked_dem_data[(num_data == 1) | (num_data == 2)] = water_value
i += 1
zipped_hgt_data = ZipFile(hgt_full_path)
dem_dataset = gdal.Open(hgt_full_path, gdal.GA_ReadOnly)
dem_data = dem_dataset.ReadAsArray()
masked_dem_data *= dem_data
metadata_dict[label]['WKT'] = dem_dataset.GetProjection()
metadata_dict[label]['GeoTransform'] = dem_dataset.GetGeoTransform()
else:
geo_transform = []
geo_transform.append(extents[0])
geo_transform.append(x_res)
geo_transform.append(0)
geo_transform.append(extents[-1])
geo_transform.append(0)
geo_transform.append(-y_res)
metadata_dict[label]['WKT'] = self._missing_data_projection
metadata_dict[label]['GeoTransform'] = geo_transform
masked_dem_data = np.full(shape=array_shape, fill_value=water_value)
i += 1
data_dict[label] = masked_dem_data
metadata_dict[label]['Geolocation'] = AffineGlobalCoords(metadata_dict[label]['GeoTransform'], center_pixels=True)
metadata_dict[label]['extents'] = extents
if self.store_geolocation_grids:
lat_coords, lon_coords = np.meshgrid(np.linspace(lat_start+1, lat_start, array_shape[0]),
np.linspace(lon_start, lon_start+1, array_shape[1]),
indexing = 'ij')
metadata_dict[label]['Latitude'] = lat_coords
metadata_dict[label]['Longitude'] = lon_coords
return ImageWrapper(obj_wrap = data_dict, meta_data = metadata_dict)
| mit |
janusnic/Djrill | djrill/tests/test_mandrill_subaccounts.py | 3 | 2389 | from django.core import mail
from .mock_backend import DjrillBackendMockAPITestCase
from .utils import override_settings
class DjrillMandrillSubaccountTests(DjrillBackendMockAPITestCase):
"""Test Djrill backend support for Mandrill subaccounts"""
def test_send_basic(self):
mail.send_mail('Subject here', 'Here is the message.',
'from@example.com', ['to@example.com'], fail_silently=False)
self.assert_mandrill_called("/messages/send.json")
data = self.get_api_call_data()
self.assertEqual(data['message']['subject'], "Subject here")
self.assertEqual(data['message']['text'], "Here is the message.")
self.assertFalse('from_name' in data['message'])
self.assertEqual(data['message']['from_email'], "from@example.com")
self.assertEqual(len(data['message']['to']), 1)
self.assertEqual(data['message']['to'][0]['email'], "to@example.com")
self.assertFalse('subaccount' in data['message'])
@override_settings(MANDRILL_SUBACCOUNT="test_subaccount")
def test_send_from_subaccount(self):
mail.send_mail('Subject here', 'Here is the message.',
'from@example.com', ['to@example.com'], fail_silently=False)
self.assert_mandrill_called("/messages/send.json")
data = self.get_api_call_data()
self.assertEqual(data['message']['subject'], "Subject here")
self.assertEqual(data['message']['text'], "Here is the message.")
self.assertFalse('from_name' in data['message'])
self.assertEqual(data['message']['from_email'], "from@example.com")
self.assertEqual(len(data['message']['to']), 1)
self.assertEqual(data['message']['to'][0]['email'], "to@example.com")
self.assertEqual(data['message']['subaccount'], "test_subaccount")
@override_settings(MANDRILL_SUBACCOUNT="global_setting_subaccount")
def test_subaccount_message_overrides_setting(self):
message = mail.EmailMessage(
'Subject here', 'Here is the message',
'from@example.com', ['to@example.com'])
message.subaccount = "individual_message_subaccount" # should override global setting
message.send()
self.assert_mandrill_called("/messages/send.json")
data = self.get_api_call_data()
self.assertEqual(data['message']['subaccount'], "individual_message_subaccount")
| bsd-3-clause |
masayukig/tempest | tempest/tests/lib/services/network/test_qos_client.py | 3 | 4637 | # Copyright (c) 2019 Ericsson
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.lib.services.network import qos_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestQosClient(base.BaseServiceTest):
FAKE_QOS_POLICY_ID = "f1011b08-1297-11e9-a1e7-c7e6825a2616"
FAKE_QOS_POLICY_REQUEST = {
'name': 'foo',
'shared': True
}
FAKE_QOS_POLICY_RESPONSE = {
'policy': {
"name": "10Mbit",
"description": "This policy limits the ports to 10Mbit max.",
"rules": [],
"id": FAKE_QOS_POLICY_ID,
"is_default": False,
"project_id": "8d4c70a21fed4aeba121a1a429ba0d04",
"revision_number": 1,
"tenant_id": "8d4c70a21fed4aeba121a1a429ba0d04",
"created_at": "2018-04-03T21:26:39Z",
"updated_at": "2018-04-03T21:26:39Z",
"shared": False,
"tags": ["tag1,tag2"]
}
}
FAKE_QOS_POLICIES = {
'policies': [
FAKE_QOS_POLICY_RESPONSE['policy']
]
}
def setUp(self):
super(TestQosClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.qos_client = qos_client.QosClient(
fake_auth, "network", "regionOne")
def _test_create_qos_policy(self, bytes_body=False):
self.check_service_client_function(
self.qos_client.create_qos_policy,
"tempest.lib.common.rest_client.RestClient.post",
self.FAKE_QOS_POLICY_RESPONSE,
bytes_body,
201,
**self.FAKE_QOS_POLICY_REQUEST)
def _test_list_qos_policies(self, bytes_body=False):
self.check_service_client_function(
self.qos_client.list_qos_policies,
"tempest.lib.common.rest_client.RestClient.get",
self.FAKE_QOS_POLICIES,
bytes_body,
200)
def _test_show_qos_policy(self, bytes_body=False):
self.check_service_client_function(
self.qos_client.show_qos_policy,
"tempest.lib.common.rest_client.RestClient.get",
self.FAKE_QOS_POLICY_RESPONSE,
bytes_body,
200,
qos_policy_id=self.FAKE_QOS_POLICY_ID)
def _test_update_qos_polcy(self, bytes_body=False):
update_kwargs = {
"name": "100Mbit",
"description": "This policy limits the ports to 100Mbit max.",
"shared": True
}
resp_body = {
"policy": copy.deepcopy(
self.FAKE_QOS_POLICY_RESPONSE['policy']
)
}
resp_body["policy"].update(update_kwargs)
self.check_service_client_function(
self.qos_client.update_qos_policy,
"tempest.lib.common.rest_client.RestClient.put",
resp_body,
bytes_body,
200,
qos_policy_id=self.FAKE_QOS_POLICY_ID,
**update_kwargs)
def test_create_qos_policy_with_str_body(self):
self._test_create_qos_policy()
def test_create_qos_policy_with_bytes_body(self):
self._test_create_qos_policy(bytes_body=True)
def test_update_qos_policy_with_str_body(self):
self._test_update_qos_polcy()
def test_update_qos_policy_with_bytes_body(self):
self._test_update_qos_polcy(bytes_body=True)
def test_show_qos_policy_with_str_body(self):
self._test_show_qos_policy()
def test_show_qos_policy_with_bytes_body(self):
self._test_show_qos_policy(bytes_body=True)
def test_delete_qos_policy(self):
self.check_service_client_function(
self.qos_client.delete_qos_policy,
"tempest.lib.common.rest_client.RestClient.delete",
{},
status=204,
qos_policy_id=self.FAKE_QOS_POLICY_ID)
def test_list_qos_policies_with_str_body(self):
self._test_list_qos_policies()
def test_list_qos_policies_with_bytes_body(self):
self._test_list_qos_policies(bytes_body=True)
| apache-2.0 |
ltilve/ChromiumGStreamerBackend | third_party/cython/src/Cython/Plex/Actions.py | 105 | 2361 | #=======================================================================
#
# Python Lexical Analyser
#
# Actions for use in token specifications
#
#=======================================================================
class Action(object):
def perform(self, token_stream, text):
pass # abstract
def same_as(self, other):
return self is other
class Return(Action):
"""
Internal Plex action which causes |value| to
be returned as the value of the associated token
"""
def __init__(self, value):
self.value = value
def perform(self, token_stream, text):
return self.value
def same_as(self, other):
return isinstance(other, Return) and self.value == other.value
def __repr__(self):
return "Return(%s)" % repr(self.value)
class Call(Action):
"""
Internal Plex action which causes a function to be called.
"""
def __init__(self, function):
self.function = function
def perform(self, token_stream, text):
return self.function(token_stream, text)
def __repr__(self):
return "Call(%s)" % self.function.__name__
def same_as(self, other):
return isinstance(other, Call) and self.function is other.function
class Begin(Action):
"""
Begin(state_name) is a Plex action which causes the Scanner to
enter the state |state_name|. See the docstring of Plex.Lexicon
for more information.
"""
def __init__(self, state_name):
self.state_name = state_name
def perform(self, token_stream, text):
token_stream.begin(self.state_name)
def __repr__(self):
return "Begin(%s)" % self.state_name
def same_as(self, other):
return isinstance(other, Begin) and self.state_name == other.state_name
class Ignore(Action):
"""
IGNORE is a Plex action which causes its associated token
to be ignored. See the docstring of Plex.Lexicon for more
information.
"""
def perform(self, token_stream, text):
return None
def __repr__(self):
return "IGNORE"
IGNORE = Ignore()
#IGNORE.__doc__ = Ignore.__doc__
class Text(Action):
"""
TEXT is a Plex action which causes the text of a token to
be returned as the value of the token. See the docstring of
Plex.Lexicon for more information.
"""
def perform(self, token_stream, text):
return text
def __repr__(self):
return "TEXT"
TEXT = Text()
#TEXT.__doc__ = Text.__doc__
| bsd-3-clause |
labkode/rtlv | handlers.py | 1 | 6340 | from google.appengine.ext.db import BadValueError
from google.appengine.api import channel
from google.appengine.api import users
from google.appengine.ext import ndb
import webapp2
import jinja2
import os
import json
from datetime import datetime
import time
from models import Log
from models import System
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class MainHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
systems = System.query().fetch()
template_values = {"systems": systems, "user": user, "users": users}
template = JINJA_ENVIRONMENT.get_template("templates/index.html")
self.response.write(template.render(template_values))
class SystemHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
system_param = self.request.get('system')
if not system_param:
template = JINJA_ENVIRONMENT.get_template("templates/not_found.html")
template_values = {"user": user, "users": users, "not_found_msg": "Please select a system"}
self.response.write(template.render(template_values))
return
system = System.get_by_id(system_param)
if system is None:
template = JINJA_ENVIRONMENT.get_template("templates/not_found.html")
template_values = {"user": user, "users": users, "not_found_msg": "The system #{0} not exists".format(system_param)}
self.response.write(template.render(template_values))
return
#logs = Log.query(ancestor = system.key).fetch()
logs = []
template_values = {"system":system, "logs": logs, "token": channel.create_channel(system.key.id()), "user": user, "users": users}
template = JINJA_ENVIRONMENT.get_template("templates/logs.html")
self.response.write(template.render(template_values))
return
class AdminSystemListHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url())
systems = System.query().fetch()
template_values = {"systems": systems, "message":{"type":"success", "payload":""},"user": user, "users": users}
template = JINJA_ENVIRONMENT.get_template("templates/list_system.html")
self.response.write(template.render(template_values))
return
class AdminSystemCreateHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url())
template = JINJA_ENVIRONMENT.get_template("templates/create_system.html")
self.response.write(template.render({"user": user, "users": users}))
return
def post(self):
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url())
system_name = self.request.get("name")
system_description = self.request.get("description")
system = System(id = system_name, description = system_description)
key = system.put()
# This is correct but is a hack, other solution is to use a sleep()
must_stop = False
systems = []
while not must_stop:
systems = System.query().fetch()
for system in systems:
if system.key.id() == system_name:
must_stop = True
systems = System.query().fetch()
template_values = {"systems": systems,"message":{"type":"success", "payload":"Created system #{0}".format(key.id())}, "user": user, "users": users}
template = JINJA_ENVIRONMENT.get_template("templates/list_system.html")
self.response.write(template.render(template_values))
return
class AdminSystemDeleteHandler(webapp2.RequestHandler):
def post(self):
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url())
system_id = self.request.get("system")
if not system_id:
template = JINJA_ENVIRONMENT.get_template("templates/not_found.html")
template_values = {"user": user, "users": users, "not_found_msg": "Please select a system"}
self.response.write(template.render(template_values))
return
sys = System.get_by_id(system_id)
if sys is None:
template = JINJA_ENVIRONMENT.get_template("templates/not_found.html")
template_values = {"user": user, "users": users, "not_found_msg": "The system #{0} not exists".format(system_id)}
self.response.write(template.render(template_values))
return
sys.key.delete()
# Hack to not use sleep solution
found = True
systems = []
while found:
found = False
systems = System.query().fetch()
print(systems)
for system in systems:
print(system.key.id(),sys.key.id())
if system.key.id() == sys.key.id():
found = True
break
systems = System.query().fetch()
template_values = {"systems": systems, "message":{"type":"success", "payload":"Deleted system #{0}".format(system_id)}, "user": user, "users": users}
template = JINJA_ENVIRONMENT.get_template("templates/list_system.html")
self.response.write(template.render(template_values))
return
class AdminLogHandler(webapp2.RequestHandler):
def post(self):
try:
log_param = json.loads(self.request.body)
except ValueError as e:
self.response.out.write(e)
self.response.set_status(400)
return
except:
self.response.set_status(500)
return
if not isinstance(log_param, list):
log_param = [log_param]
for log_item in log_param:
log_system = log_item.get("system")
if not log_system:
self.response.out.write("System not found")
self.response.set_status(404)
system = System.get_by_id(log_system)
if not system:
self.response.out.write("System not found")
self.response.set_status(404)
return
try:
log_key = ndb.Key("Log", log_item.get("id"), parent = system.key)
log_msg = log_item.get("msg")
log_level = log_item.get("level")
log_ts = log_item.get("ts")
log = Log(key = log_key, msg = log_msg, level = log_level, ts = log_ts)
# CHANNEL API
channel.send_message(system.key.id(), json.dumps(log.to_dict()))
except BadValueError as e:
self.response.out.write(e)
self.response.set_status(400)
return
return
class HelpHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
template_values = {"user": user, "users": users}
template = JINJA_ENVIRONMENT.get_template("templates/help.html")
self.response.write(template.render(template_values))
| agpl-3.0 |
zcth428/hpctoolkit111 | src/tool/hpcrun/sample-sources/make-cuda-wrappers.py | 4 | 11612 | #!/usr/local/bin/python
# -*- python -*-
#
# HPCToolkit MPI Profiler
# this script is adapted from mpiP MPI Profiler ( http://mpip.sourceforge.net/ )
#
# Please see COPYRIGHT AND LICENSE information at the end of this file.
#
#
# make-wrappers.py -- parse the mpi prototype file and generate a
# series of output files, which include the wrappers for profiling
# layer and other data structures.
#
# $Id: make-wrappers.py 442 2010-03-03 17:18:04Z chcham $
#
import sys
import string
import os
import copy
import re
import time
import getopt
import socket
import pdb
driverSkipList = [
'cuCtxCreate_v2',
'cuCtxDestroy_v2',
'cuMemcpyHtoD_v2',
'cuMemcpyDtoH_v2',
'cuMemcpyHtoDAsync_v2',
'cuMemcpyDtoHAsync_v2',
'cuStreamCreate',
'cuStreamSynchronize',
'cuStreamDestroy_v2',
'cuEventSynchronize',
'cuLaunchGridAsync',
'cuLaunchKernel']
runtimeSkipList = [
'cudaDeviceSynchronize',
'cudaThreadSynchronize',
'cudaStreamCreate',
'cudaStreamDestroy',
'cudaStreamWaitEvent',
'cudaStreamSynchronize',
'cudaEventSynchronize',
'cudaConfigureCall',
'cudaLaunch',
'cudaMalloc',
'cudaMallocArray',
'cudaFree',
'cudaFreeArray',
'cudaMemcpy',
'cudaMemcpy2D',
'cudaMemcpyAsync',
'cudaMemcpyToArray',
'cudaMemcpyToArrayAsync',
'cudaMalloc3D',
'cudaMalloc3DArray',
'cudaMemcpy3D',
'cudaMemcpy3DPeer',
'cudaMemcpy3DAsync',
'cudaMemcpy3DPeerAsync',
'cudaMemcpyPeer',
'cudaMemcpyFromArray',
'cudaMemcpyArrayToArray',
'cudaMemcpy2DToArray',
'cudaMemcpy2DFromArray',
'cudaMemcpy2DArrayToArray',
'cudaMemcpyToSymbol',
'cudaMemcpyFromSymbol',
'cudaMemcpyPeerAsync',
'cudaMemcpyFromArrayAsync',
'cudaMemcpy2DAsync',
'cudaMemcpy2DToArrayAsync',
'cudaMemcpy2DFromArrayAsync',
'cudaMemcpyToSymbolAsync',
'cudaMemcpyFromSymbolAsync',
'cudaMemset',
'cudaMemset2D',
'cudaMemset3D',
'cudaMemsetAsync',
'cudaMemset2DAsync',
'cudaMemset3DAsync']
def WritecuDriverFunctionPointerTable(file, funcNames):
# a table like this:
#cuDriverFunctionPointer_t cuDriverFunctionPointer[] = {
# {0, "cuStreamCreate"},
# {0, "cuStreamDestroy"},
# ...
#};
fp = open(file,'w')
fp.write('''
// GENERATED FILE DON'T EDIT
#include "gpu_blame-cuda-driver-header.h"
#include<cuda.h>
#include<cuda_runtime_api.h>
''')
fp.write('cuDriverFunctionPointer_t cuDriverFunctionPointer[] = {\n')
for name in funcNames:
fp.write('\t {{.generic = (void*)0},"' + name[1] + '"},\n')
fp.write('};\n')
fp.close();
def WritecuRuntimeFunctionPointerTable(file, funcNames):
# a table like this:
#cudaRuntimeFunctionPointer_t cudaRuntimeFunctionPointer[] = {
# {0, "cudaThreadSynchronize"},
# ...
#};
fp = open(file,'w')
fp.write('''
// GENERATED FILE DON'T EDIT
#include<cuda.h>
#include<cuda_runtime_api.h>
#include "gpu_blame-cuda-runtime-header.h"
''')
fp.write('cudaRuntimeFunctionPointer_t cudaRuntimeFunctionPointer[] = {\n')
for name in funcNames:
fp.write('\t {{.generic = (void*)0},"' + name[1] + '"},\n')
fp.write('};\n')
fp.close();
def FuncNameToCapitalizedEnum(name):
# convert cuStreamCreate to CU_STREAM_CREATE
result = ''
for letter in name:
if letter.isupper():
result = result + '_'
result = result + letter.upper()
return result
def FuncNameToEnum(name):
return name + 'Enum'
def WriteDriverFunctionPointerHeader(file, funcSig):
#Produce struct like this:
#typedef struct cuDriverFunctionPointer {
# union {
# CUresult(*generic) (void);
# CUresult(*cuStreamCreateReal) (CUstream * phStream, unsigned int Flags);
# CUresult(*cuStreamDestroyReal) (CUstream hStream);
# CUresult(*cuStreamSynchronizeReal) (CUstream hStream);
# CUresult(*cuEventSynchronizeReal) (CUevent event);
# };
# const char *functionName;
#} cuDriverFunctionPointer_t;
fp = open(file,'w')
fp.write('''
// GENERATED FILE DON'T EDIT
#ifndef __CU_DRIVER_HEADER_H__
#define __CU_DRIVER_HEADER_H__
#include<cuda.h>
#include<cuda_runtime_api.h>
typedef struct cuDriverFunctionPointer {
union {
void* generic;
''')
for sig in funcSig:
fp.write('\t' + sig[0] + '(*' + sig[1] + 'Real) (' + sig[2] + ');\n' )
fp.write(
''' };
const char *functionName;
} cuDriverFunctionPointer_t;
''')
# create enum like this:
#enum cuDriverAPIIndex {
# cuStreamCreateEnum,
# cuStreamDestroyEnum,
# ...
# CU_MAX_APIS
#};
fp.write('''
enum cuDriverAPIIndex {
''')
for sig in funcSig:
fp.write('\t' + FuncNameToEnum(sig[1]) + ',\n' )
fp.write('''
CU_MAX_APIS
};
extern cuDriverFunctionPointer_t cuDriverFunctionPointer[CU_MAX_APIS];
''')
fp.write('#endif\n')
fp.close();
def WriteRuntimeFunctionPointerHeader(file, funcSig):
#Produce struct like this:
#typedef struct cudaRuntimeFunctionPointer {
# union {
# cudaError_t(*generic) (void);
# cudaError_t(*cudaThreadSynchronizeReal) (void);
# };
# const char *functionName;
#} cudaRuntimeFunctionPointer_t;
fp = open(file,'w')
fp.write('''
// GENERATED FILE DON'T EDIT
#ifndef __CUDA_RUNTIME_HEADER_H__
#define __CUDA_RUNTIME_HEADER_H__
#include<cuda.h>
#include<cuda_runtime_api.h>
typedef struct cudaRuntimeFunctionPointer {
union {
void* generic;
''')
for sig in funcSig:
fp.write('\t' + sig[0] + '(*' + sig[1] + 'Real) (' + sig[2] + ');\n' )
fp.write(
''' };
const char *functionName;
} cudaRuntimeFunctionPointer_t;
''')
# create enum like this:
#enum cudaRuntimeAPIIndex {
# cudaThreadSynchronizeEnum,
# cudaStreamSynchronizeEnum,
# cudaDeviceSynchronizeEnum,
# ...
# CUDA_MAX_APIS
#};
fp.write('''
enum cudaRuntimeAPIIndex{
''')
for sig in funcSig:
fp.write('\t' + FuncNameToEnum(sig[1]) + ',\n' )
fp.write('''
CUDA_MAX_APIS
};
extern cudaRuntimeFunctionPointer_t cudaRuntimeFunctionPointer[CUDA_MAX_APIS];
''')
fp.write('#endif\n')
fp.close();
def WriteDriverFunctionWrapper(file, funcSig):
fp = open(file,'w')
fp.write('''
// GENERATED FILE DON'T EDIT
#include <stdbool.h>
#include <hpcrun/thread_data.h>
#include <monitor.h>
#include<cuda.h>
#include "gpu_blame-cuda-driver-header.h"
extern bool hpcrun_is_safe_to_sync(const char* fn);
''')
for sig in funcSig:
#skip the manually done ones
if sig[1] in driverSkipList: continue
fp.write('\t' + sig[0] + sig[1] + ' (' + sig[2] + ') {\n' )
fp.write('if (! hpcrun_is_safe_to_sync(__func__)) {')
fp.write(' return cuDriverFunctionPointer[' +FuncNameToEnum(sig[1]) + '].' + sig[1] + 'Real(')
args = sig[2].split(',')
first = True
for argTypeName in args:
if not first:
fp.write(', ')
else:
first = False
param = argTypeName.split()[-1].split('*')[-1]
if param.strip() != "void":
fp.write(param)
fp.write( ');\n')
fp.write('}\n')
fp.write('TD_GET(gpu_data.is_thread_at_cuda_sync) = true;\n')
fp.write('monitor_disable_new_threads();\n')
#fp.write('printf("\\n%s on","' +sig[1] +'");fflush(stdout);')
fp.write('CUresult ret = cuDriverFunctionPointer[' +FuncNameToEnum(sig[1]) + '].' + sig[1] + 'Real(')
args = sig[2].split(',')
first = True
for argTypeName in args:
if not first:
fp.write(', ')
else:
first = False
param = argTypeName.split()[-1].split('*')[-1]
if param.strip() != "void":
fp.write(param)
fp.write( ');\n')
fp.write('monitor_enable_new_threads();\n')
fp.write('TD_GET(gpu_data.is_thread_at_cuda_sync) = false;\n')
#fp.write('printf("\\n%s off","' +sig[1] +'");fflush(stdout);')
fp.write('return ret;\n')
fp.write('}\n')
# fp.write('''
##endif
#''')
fp.close();
def WriteRuntimeFunctionWrapper(file, funcSig):
fp = open(file,'w')
fp.write('''
// GENERATED FILE DON'T EDIT
#include <stdbool.h>
#include <hpcrun/thread_data.h>
#include <monitor.h>
#include<cuda.h>
#include<cuda_runtime_api.h>
#include "gpu_blame-cuda-runtime-header.h"
extern bool hpcrun_is_safe_to_sync(const char* fn);
''')
for sig in funcSig:
#skip the manually done ones
if sig[1] in runtimeSkipList: continue
fp.write('\t' + sig[0] + sig[1] + ' (' + sig[2] + ') {\n' )
fp.write('if (! hpcrun_is_safe_to_sync(__func__)) {')
fp.write(' return cudaRuntimeFunctionPointer[' +FuncNameToEnum(sig[1]) + '].' + sig[1] + 'Real(')
args = sig[2].split(',')
first = True
for argTypeName in args:
if not first:
fp.write(', ')
else:
first = False
param = argTypeName.split()[-1].split('*')[-1]
if param.strip() != "void":
fp.write(param)
fp.write( ');\n')
fp.write('}\n')
fp.write('TD_GET(gpu_data.is_thread_at_cuda_sync) = true;\n')
fp.write('monitor_disable_new_threads();\n')
#fp.write('printf("\\n%s on","' +sig[1] +'");')
fp.write('cudaError_t ret = cudaRuntimeFunctionPointer[' +FuncNameToEnum(sig[1]) + '].' + sig[1] + 'Real(')
args = sig[2].split(',')
first = True
for argTypeName in args:
if not first:
fp.write(', ')
else:
first = False
param = argTypeName.split()[-1].split('*')[-1]
if param.strip() != "void":
fp.write(param)
fp.write( ');\n')
fp.write('monitor_enable_new_threads();\n')
fp.write('TD_GET(gpu_data.is_thread_at_cuda_sync) = false;\n')
#fp.write('printf("\\n%s off","' +sig[1] +'");')
fp.write('return ret;\n')
fp.write('}\n')
fp.close();
#cuPattern = '\s*(CUresult[\s\n]+)(CUDAAPI[\s\n]+)(cu[a-zA-Z0-9_]*[\s\n]*)\(([^;]*)\)[\s\n]*;'
#cudaPattern = '\s*extern[\s\n]+__host__[\s\n]+(cudaError_t[\s\n]+)(CUDARTAPI[\s\n]+)(cuda[a-zA-Z0-9_]*[\s\n]*)\(([^;]*)\)[\s\n]*;'
cuPattern = '\s*(CUresult[\s\n]+)(cu[a-zA-Z0-9_]*[\s\n]*)\(([^;]*)\)[\s\n]*;'
cudaPattern = '\s*extern[\s\n]+(cudaError_t[\s\n]+)(cuda[a-zA-Z0-9_]*[\s\n]*)\(([^;]*)\)[\s\n]*;'
inFile = open(sys.argv[2]).read()
generatedHeaderFile = sys.argv[3]
generatedTableFile = sys.argv[4]
generatedWrapperFile = sys.argv[5]
if sys.argv[1] == 'driver':
lines = re.finditer(cuPattern,inFile, re.MULTILINE)
elif sys.argv[1] == 'runtime':
lines = re.finditer(cudaPattern,inFile, re.MULTILINE)
else:
print 'Invalid pattern'
exit(-1)
defaultValue = re.compile('__dv\s*\(.*\)')
signatures = []
for line in lines:
funcName = line.group(2)
funcPrefix = line.group(1)
funcArgs = line.group(3)
noDefaultArgs = defaultValue.sub('',funcArgs)
#print p.group(1), p.group(2), p.group(3), p.group(4), p.group(5), '(', n, ')'
args = noDefaultArgs.split(',')
#print funcPrefix, funcName, '(' , noDefaultArgs, ')'
for argTypeName in args:
last = argTypeName.split()[-1]
last = last.split('*')[-1]
#print last
signatures.append((funcPrefix, funcName, noDefaultArgs))
if sys.argv[1] == 'driver':
WriteDriverFunctionPointerHeader(generatedHeaderFile, signatures)
WritecuDriverFunctionPointerTable(generatedTableFile, signatures)
WriteDriverFunctionWrapper(generatedWrapperFile, signatures)
elif sys.argv[1] == 'runtime':
WriteRuntimeFunctionPointerHeader(generatedHeaderFile, signatures)
WritecuRuntimeFunctionPointerTable(generatedTableFile, signatures)
WriteRuntimeFunctionWrapper(generatedWrapperFile, signatures)
| bsd-3-clause |
incuna/authentic | authentic2/auth2_auth/auth2_openid/views.py | 1 | 17205 | import urllib
from django_authopenid.forms import OpenidDissociateForm, AssociateOpenID
from django_authopenid.forms import OpenidSigninForm
from django_authopenid import DjangoOpenIDStore
from django_authopenid.models import UserAssociation
from django_authopenid.utils import *
from django_authopenid.views import associate_failure, complete
from django_authopenid.views import _build_context, signin_failure, not_authenticated
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import REDIRECT_FIELD_NAME, login
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.encoding import smart_unicode
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from django.views.generic.simple import redirect_to
from django.contrib import messages
from openid.consumer.consumer import Consumer, SUCCESS, CANCEL, FAILURE, SETUP_NEEDED
from openid.consumer.discover import DiscoveryFailure
from openid.yadis import xri
from authentic2.auth2_auth.auth2_openid import *
OPENID_PROVIDER = ['https://me.yahoo.com//','http://openid.aol.com/','http://.myopenid.com/',
'http://.livejournal.com/','http://www.flickr.com/photos//','http://.wordpress.com/'
'http://.blogspot.com/','http://.pip.verisignlabs.com/','http://.myvidoop.com/'
'http://.pip.verisignlabs.com/','http://claimid.com/']
def signin_success(request, identity_url, openid_response,
redirect_field_name=REDIRECT_FIELD_NAME, **kwargs):
"""
openid signin success.
If the openid is already registered, the user is redirected to
url set par next or in settings with OPENID_REDIRECT_NEXT variable.
If none of these urls are set user is redirectd to /.
if openid isn't registered user is redirected to register page.
"""
openid_ = from_openid_response(openid_response)
openids = request.session.get('openids', [])
openids.append(openid_)
request.session['openids'] = openids
request.session['openid'] = openid_
redirect_to = request.REQUEST.get(redirect_field_name, '')
try:
rel = UserAssociation.objects.get(openid_url__exact = str(openid_))
except:
# try to register this new user
if not redirect_to: # or '//' in redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
params = urllib.urlencode({ redirect_field_name: redirect_to })
redirect_to = "%s?%s" % (reverse('user_register'), params)
return HttpResponseRedirect(redirect_to)
user_ = rel.user
if user_.is_active:
user_.backend = "django.contrib.auth.backends.ModelBackend"
login(request, user_)
if not redirect_to: # or '//' in redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(redirect_to)
def mycomplete(request, on_success=None, on_failure=None, return_to=None,
**kwargs):
on_success = on_success or default_on_success
on_failure = on_failure or default_on_failure
consumer = Consumer(request.session, DjangoOpenIDStore())
# make sure params are encoded in utf8
params = dict((k,smart_unicode(v)) for k, v in request.GET.items())
openid_response = consumer.complete(params, return_to)
if not hasattr(request.GET,'openid.identity'):
_openid_url = 'None'
else:
_openid_url = request.GET['openid.identity']
if openid_response.status == SUCCESS:
auth_oidlogin.send(sender = None, openid_url = _openid_url, state = 'success')
return on_success(request, openid_response.identity_url,
openid_response, **kwargs)
elif openid_response.status == CANCEL:
auth_oidlogin.send(sender = None, openid_url = _openid_url, state = 'cancel')
return on_failure(request, 'The request was canceled', **kwargs)
elif openid_response.status == FAILURE:
auth_oidlogin.send(sender = None, openid_url = _openid_url, state = 'failure')
return on_failure(request, openid_response.message, **kwargs)
elif openid_response.status == SETUP_NEEDED:
auth_oidlogin.send(sender = None, openid_url = _openid_url, state = 'setup_needed')
return on_failure(request, 'Setup needed', **kwargs)
else:
assert False, "Bad openid status: %s" % openid_response.status
@csrf_exempt
def complete_signin(request, redirect_field_name=REDIRECT_FIELD_NAME,
openid_form=OpenidSigninForm, auth_form=AuthenticationForm,
on_success=signin_success, on_failure=signin_failure,
extra_context=None):
_openid_form = openid_form
_auth_form = auth_form
_extra_context = extra_context
return mycomplete(request, on_success, on_failure,
get_url_host(request) + reverse('user_complete_signin'),
redirect_field_name=redirect_field_name, openid_form=_openid_form,
auth_form=_auth_form, extra_context=_extra_context)
def ask_openid(request, openid_url, redirect_to, on_failure=None):
on_failure = on_failure or signin_failure
sreg_req = None
ax_req = None
_openid_url = openid_url
trust_root = getattr(
settings, 'OPENID_TRUST_ROOT', get_url_host(request) + '/'
)
if xri.identifierScheme(openid_url) == 'XRI' and getattr(
settings, 'OPENID_DISALLOW_INAMES', False
):
msg = ("i-names are not supported")
auth_oidlogin.send(sender = None, openid_url = _openid_url, state = 'not_supported')
return on_failure(request, msg)
consumer = Consumer(request.session, DjangoOpenIDStore())
try:
auth_request = consumer.begin(openid_url)
except DiscoveryFailure:
msg = ("The OpenID %s was invalid") % openid_url
auth_oidlogin.send(sender = None, openid_url = _openid_url, state = 'invalid')
return on_failure(request, msg)
# get capabilities
use_ax, use_sreg = discover_extensions(openid_url)
if use_sreg:
# set sreg extension
# we always ask for nickname and email
sreg_attrs = getattr(settings, 'OPENID_SREG', {})
sreg_attrs.update({ "optional": ['nickname', 'email'] })
sreg_req = sreg.SRegRequest(**sreg_attrs)
if use_ax:
# set ax extension
# we always ask for nickname and email
ax_req = ax.FetchRequest()
ax_req.add(ax.AttrInfo('http://schema.openid.net/contact/email',
alias='email', required=True))
ax_req.add(ax.AttrInfo('http://schema.openid.net/namePerson/friendly',
alias='nickname', required=True))
# add custom ax attrs
ax_attrs = getattr(settings, 'OPENID_AX', [])
for attr in ax_attrs:
if len(attr) == 2:
ax_req.add(ax.AttrInfo(attr[0], required=alias[1]))
else:
ax_req.add(ax.AttrInfo(attr[0]))
if sreg_req is not None:
auth_request.addExtension(sreg_req)
if ax_req is not None:
auth_request.addExtension(ax_req)
redirect_url = auth_request.redirectURL(trust_root, redirect_to)
return HttpResponseRedirect(redirect_url)
@csrf_exempt
@not_authenticated
def signin(request, template_name='authopenid/signin.html',
redirect_field_name=REDIRECT_FIELD_NAME, openid_form=OpenidSigninForm,
auth_form=AuthenticationForm, on_failure=None, extra_context=None):
if on_failure is None:
on_failure = signin_failure
redirect_to = request.REQUEST.get(redirect_field_name, '')
form1 = openid_form()
form2 = auth_form()
if request.POST:
if not redirect_to or '://' in redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
if 'openid_url' in request.POST.keys():
form1 = openid_form(data=request.POST)
if form1.is_valid():
redirect_url = "%s%s?%s" % (
get_url_host(request),
reverse('user_complete_signin'),
urllib.urlencode({ redirect_field_name: redirect_to })
)
return ask_openid(request,
form1.cleaned_data['openid_url'],
redirect_url,
on_failure=on_failure)
else:
# perform normal django authentification
form2 = auth_form(data=request.POST)
if form2.is_valid():
login(request, form2.get_user())
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
return HttpResponseRedirect(redirect_to)
return render_to_response(template_name, {
'form1': form1,
'form2': form2,
redirect_field_name: redirect_to,
'msg': request.GET.get('msg','')
}, context_instance=_build_context(request, extra_context=extra_context))
@csrf_exempt
@login_required
def dissociate(request, template_name="authopenid/dissociate.html",
dissociate_form=OpenidDissociateForm,
redirect_field_name=REDIRECT_FIELD_NAME,
default_redirect=settings.LOGIN_REDIRECT_URL, extra_context=None):
""" view used to dissociate an openid from an account """
nb_associated_openids, associated_openids = get_associate_openid(request.user)
if nb_associated_openids == 1 and not request.user.has_usable_password() and request.method != 'GET':
msg = ["You can't remove this openid, you should set a password first."]
return render_to_response("authopenid/associate.html",{
'associated_openids' : associated_openids ,
'nb_associated_openids':nb_associated_openids,
'msg': msg},
context_instance = RequestContext(request)
)
if request.POST:
if request.POST.get('bdissociate_cancel','') == 'Cancel':
msg = ['Operation Cancel.']
return redirect_to(request,'/accounts/openid/associate/')
openid_urls = request.POST.getlist('a_openids_remove')
if len(openid_urls) >= 1:
for openid_url in openid_urls:
UserAssociation.objects.get(openid_url__exact=openid_url).delete()
if openid_url == request.session.get('openid_url'):
del request.session['openid_url']
msg = "Openid removed."
request.user.message_set.create(message = msg)
return redirect_to(request,'/accounts/openid/associate')
else:
return redirect_to(request, '/accounts/openid/associate')
@login_required
def associate(request, template_name='authopenid/associate.html',
openid_form=AssociateOpenID, redirect_field_name='/',
on_failure=associate_failure, extra_context=None):
nb_associated_openids, associated_openids = get_associate_openid(request.user)
redirect_to = request.REQUEST.get(redirect_field_name, '')
if request.POST:
if 'a_openids' in request.POST.keys():
a_openids = []
if request.POST.get('a_openids','') is not '':
a_openids = request.POST.getlist('a_openids')
if len(a_openids) == nb_associated_openids and not request.user.has_usable_password():
if len(a_openids) > 1:
msg = ["You can't remove these openids, You should set a password first."]
else:
msg = ["You can't remove this openid, You should set a password first."]
return render_to_response('authopenid/associate.html', {
redirect_field_name: redirect_to,
'associated_openids' : associated_openids,
'nb_associated_openids' : nb_associated_openids,
'msg':msg,
}, context_instance=_build_context(request, extra_context=extra_context))
return render_to_response("authopenid/dissociate.html",{
'a_openids' : a_openids },
context_instance = RequestContext(request)
)
else:
form = openid_form(request.user, data=request.POST)
if form.is_valid():
if ' ' in form.cleaned_data['openid_url'] or form.cleaned_data['openid_url'] in OPENID_PROVIDER:
msg = ['You must enter a valid OpenID url']
return render_to_response('authopenid/associate.html', {
redirect_field_name: redirect_to,
'associated_openids' : associated_openids,
'nb_associated_openids' : nb_associated_openids,
'msg':msg,
}, context_instance=_build_context(request, extra_context=extra_context))
if not redirect_to or '://' in redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
redirect_url = "%s%s?%s" % (
get_url_host(request),
reverse('user_complete_myassociate'),
urllib.urlencode({ redirect_field_name: redirect_to })
)
return ask_openid(request,
form.cleaned_data['openid_url'],
redirect_url,
on_failure=on_failure)
else:
msg = ['You must enter a valid OpenID url']
return render_to_response('authopenid/associate.html', {
redirect_field_name: redirect_to,
'associated_openids' : associated_openids,
'nb_associated_openids' : nb_associated_openids,
'msg':msg,
}, context_instance=_build_context(request, extra_context=extra_context))
else:
form = openid_form(request.user)
msg = messages.get_messages(request)
return render_to_response('authopenid/associate.html', {
'form': form,
redirect_field_name: redirect_to,
'associated_openids' : associated_openids,
'nb_associated_openids' : nb_associated_openids,
'msg':msg,
}, context_instance=_build_context(request, extra_context=extra_context))
@login_required
def associate_success(request, identity_url, openid_response,
redirect_field_name=REDIRECT_FIELD_NAME, send_email=True, **kwargs):
openid_ = from_openid_response(openid_response)
openids = request.session.get('openids', [])
openids.append(openid_)
request.session['openids'] = openids
uassoc = UserAssociation(
openid_url=str(openid_),
user_id=request.user.id
)
uassoc.save(send_email=send_email)
redirect_to = '/accounts/openid/associate'
nb_associated_openids, associated_openids = get_associate_openid(request.user)
msg = ["Your Openid has been added"]
return render_to_response("authopenid/associate.html",{
'associated_openids' : associated_openids ,
'nb_associated_openids':nb_associated_openids,
'msg': msg},
context_instance = RequestContext(request)
)
@csrf_exempt
@login_required
def complete_associate(request, redirect_field_name=REDIRECT_FIELD_NAME,
template_failure='authopenid/associate.html',
openid_form=AssociateOpenID, redirect_name=None,
on_success=associate_success, on_failure=associate_failure,
send_email=True, extra_context=None):
if request.method == 'GET':
return mycomplete(request, on_success, on_failure,
get_url_host(request) + reverse('user_complete_myassociate'),
redirect_field_name=redirect_field_name, openid_form=openid_form,
template_failure=template_failure, redirect_name=redirect_name,
send_email=send_email, extra_context=extra_context)
else:
return associate(request, template_name='authopenid/associate.html',
openid_form=AssociateOpenID, redirect_field_name='/',
on_failure=associate_failure, extra_context=None)
def get_associate_openid(user):
""" get list of associated openids """
rels = UserAssociation.objects.filter(user=user)
associated_openids = [rel.openid_url for rel in rels]
nb_associated_openids = len(associated_openids)
return nb_associated_openids, associated_openids
def openid_profile(request, next, template_name='auth/openid_profile.html'):
nb, associated_openids = get_associate_openid(request.user)
return render_to_string(template_name,
{ 'idp_openid': getattr(settings, 'IDP_OPENID', False),
'associated_openids': associated_openids},
RequestContext(request))
| agpl-3.0 |
swiftix/swift.old | docs/conf.py | 9 | 9024 | # -*- coding: utf-8 -*-
#
# Swift documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'Swift'
copyright = u'2015, Apple Inc'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.2'
# The full version, including alpha/beta/rc tags.
release = '2.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%Y-%m-%d'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
highlight_language = 'swift'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
html_style = 'swift.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Red links are a bit too garish
"linkcolor" : "#577492",
"visitedlinkcolor" : "#577492",
"hoverlinkcolor" : "#551A8B"
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["."]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {'LangRef': 'archive/LangRef.html'}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Swiftdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'Swift.tex', u'Swift Documentation',
u'LLVM project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('contents', 'Swift', u'Swift Documentation',
[u'LLVM project'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('contents', 'Swift', u'Swift Documentation',
u'LLVM project', 'Swift', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# FIXME: Define intersphinx configration.
intersphinx_mapping = {}
# -- Options for extensions ----------------------------------------------------
# Enable this if you want TODOs to show up in the generated documentation.
todo_include_todos = True
#
# Monkeypatch pygments so it will know about the Swift lexers
#
# Pull in the Swift lexers
from os.path import dirname, abspath, join as join_paths
sys.path = [
join_paths(dirname(dirname(abspath(__file__))), 'utils', 'pygments')
] + sys.path
import swift as swift_pygments_lexers
sys.path.pop(0)
# Monkeypatch pygments.lexers.get_lexer_by_name to return our lexers
from pygments.lexers import get_lexer_by_name as original_get_lexer_by_name
def swift_get_lexer_by_name(_alias, *args, **kw):
if _alias == 'swift':
return swift_pygments_lexers.SwiftLexer()
elif _alias == 'swift-console':
return swift_pygments_lexers.SwiftConsoleLexer()
else:
return original_get_lexer_by_name(_alias, *args, **kw)
import pygments.lexers
pygments.lexers.get_lexer_by_name = swift_get_lexer_by_name
| apache-2.0 |
kaarl/pyload | module/plugins/crypter/FilecryptCc.py | 2 | 7916 | # -*- coding: utf-8 -*-
#
# Test links:
# http://filecrypt.cc/Container/64E039F859.html
import binascii
import re
import urlparse
import Crypto.Cipher.AES
from module.plugins.internal.Crypter import Crypter
from module.plugins.captcha.ReCaptcha import ReCaptcha
from module.plugins.captcha.SolveMedia import SolveMedia
class FilecryptCc(Crypter):
__name__ = "FilecryptCc"
__type__ = "crypter"
__version__ = "0.26"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?filecrypt\.cc/Container/\w+'
__config__ = [("activated", "bool", "Activated", True)]
__description__ = """Filecrypt.cc decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("zapp-brannigan", "fuerst.reinje@web.de" ),
("GammaC0de" , "nitzo2001[AT]yahoo[DOT]com")]
# URL_REPLACEMENTS = [(r'.html$', ""), (r'$', ".html")] #@TODO: Extend SimpleCrypter
COOKIES = [("filecrypt.cc", "lang", "en")]
DLC_LINK_PATTERN = r'onclick="DownloadDLC\(\'(.+)\'\);">'
WEBLINK_PATTERN = r"openLink.?'([\w\-]*)',"
CAPTCHA_PATTERN = r'<h2>Security prompt</h2>'
INTERNAL_CAPTCHA_PATTERN = r'<img id="nc" src="(.+?)"'
CIRCLE_CAPTCHA_PATTERN = r'<input type="image" src="(.+?)"'
KEY_CAPTCHA_PATTERN = r"<script language=JavaScript src='(http://backs\.keycaptcha\.com/swfs/cap\.js)'"
SOLVE_MEDIA_PATTERN = r'<script type="text/javascript" src="(http://api\.solvemedia\.com/papi/challenge.+?)"'
MIRROR_PAGE_PATTERN = r'"[\w]*" href="(https?://(?:www\.)?filecrypt.cc/Container/\w+\.html\?mirror=\d+)">'
def setup(self):
self.urls = []
def decrypt(self, pyfile):
self.data = self.load(pyfile.url)
if "content notfound" in self.data: #@NOTE: "content notfound" is NOT a typo
self.offline()
self.handle_password_protection()
self.handle_captcha()
self.handle_mirror_pages()
for handle in (self.handle_CNL, self.handle_weblinks, self.handle_dlc_container):
handle()
if self.urls:
self.packages = [(pyfile.package().name, self.urls, pyfile.package().name)]
return
def handle_mirror_pages(self):
if "mirror=" not in self.site_with_links:
return
mirror = re.findall(self.MIRROR_PAGE_PATTERN, self.site_with_links)
self.log_info(_("Found %d mirrors") % len(mirror))
for i in mirror[1:]:
self.site_with_links = self.site_with_links + self.load(i)
def handle_password_protection(self):
if '<input type="text" name="password"' not in self.data:
return
self.log_info(_("Folder is password protected"))
password = self.get_password()
if not password:
self.fail(_("Please enter the password in package section and try again"))
self.data = self.load(self.pyfile.url, post={'password': password})
def handle_captcha(self):
if re.search(self.CAPTCHA_PATTERN, self.data):
m1 = re.search(self.INTERNAL_CAPTCHA_PATTERN, self.data)
m2 = re.search(self.CIRCLE_CAPTCHA_PATTERN, self.data)
m3 = re.search(self.SOLVE_MEDIA_PATTERN, self.data)
m4 = re.search(self.KEY_CAPTCHA_PATTERN, self.data)
if m1: #: Normal captcha
self.log_debug("Internal Captcha URL: %s" % urlparse.urljoin(self.pyfile.url, m1.group(1)))
captcha_code = self.captcha.decrypt(urlparse.urljoin(self.pyfile.url, m1.group(1)),
ref=True, input_type="gif")
self.site_with_links = self.load(self.pyfile.url,
post={'recaptcha_response_field': captcha_code})
elif m2: #: Circle captcha
self.log_debug("Circle Captcha URL: %s" % urlparse.urljoin(self.pyfile.url, m2.group(1)))
captcha_code = self.captcha.decrypt(urlparse.urljoin(self.pyfile.url, m2.group(1)),
input_type="png", output_type='positional')
self.site_with_links = self.load(self.pyfile.url,
post={'button.x': captcha_code[0],
'button.y': captcha_code[1]})
elif m3: #: Solvemedia captcha
self.log_debug("Solvemedia Captcha URL: %s" % urlparse.urljoin(self.pyfile.url, m3.group(1)))
solvemedia = SolveMedia(self.pyfile)
captcha_key = solvemedia.detect_key()
if captcha_key:
self.captcha = solvemedia
response, challenge = solvemedia.challenge(captcha_key)
self.site_with_links = self.load(self.pyfile.url,
post={'adcopy_response' : response,
'adcopy_challenge' : challenge})
elif m4: #: Keycaptcha captcha
self.log_debug("Keycaptcha Captcha URL: %s unsupported, retrying" % m4.group(1))
self.retry()
else:
recaptcha = ReCaptcha(self.pyfile)
captcha_key = recaptcha.detect_key()
if captcha_key:
self.captcha = recaptcha
try:
response, challenge = recaptcha.challenge(captcha_key)
except Exception:
self.retry_captcha()
self.site_with_links = self.load(self.pyfile.url,
post={'g-recaptcha-response': response})
else:
self.log_info(_("Unknown captcha found, retrying"))
self.retry()
if re.search(self.CAPTCHA_PATTERN, self.site_with_links):
self.retry_captcha()
else:
self.log_info(_("No captcha found"))
self.site_with_links = self.data
def handle_dlc_container(self):
dlcs = re.findall(self.DLC_LINK_PATTERN, self.site_with_links)
if not dlcs:
return
for _dlc in dlcs:
self.urls.append(urlparse.urljoin(self.pyfile.url, "/DLC/%s.dlc" % _dlc))
def handle_weblinks(self):
try:
links = re.findall(self.WEBLINK_PATTERN, self.site_with_links)
for _link in links:
res = self.load(urlparse.urljoin(self.pyfile.url, "/Link/%s.html" % _link))
link2 = re.search('<iframe noresize src="(.*)"></iframe>', res)
if link2:
res2 = self.load(link2.group(1), just_header=True)
self.urls.append(res2['location'])
except Exception, e:
self.log_debug("Error decrypting weblinks: %s" % e)
def handle_CNL(self):
try:
vjk = re.findall('<input type="hidden" name="jk" value="function f\(\){ return \'(.*)\';}">', self.site_with_links)
vcrypted = re.findall('<input type="hidden" name="crypted" value="(.*)">', self.site_with_links)
for i in xrange(len(vcrypted)):
self.urls.extend(self._get_links(vcrypted[i], vjk[i]))
except Exception, e:
self.log_debug("Error decrypting CNL: %s" % e)
def _get_links(self, crypted, jk):
#: Get key
key = binascii.unhexlify(str(jk))
#: Decrypt
Key = key
IV = key
obj = Crypto.Cipher.AES.new(Key, Crypto.Cipher.AES.MODE_CBC, IV)
text = obj.decrypt(crypted.decode('base64'))
#: Extract links
text = text.replace("\x00", "").replace("\r", "")
links = filter(bool, text.split('\n'))
return links
| gpl-3.0 |
espressomd/espresso | maintainer/benchmarks/lb.py | 1 | 6553 | #
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Benchmark Lattice-Boltzmann fluid + Lennard-Jones particles
"""
import os
import sys
import numpy as np
from time import time
import argparse
parser = argparse.ArgumentParser(description="Benchmark LB simulations. "
"Save the results to a CSV file.")
parser.add_argument("--particles_per_core", metavar="N", action="store",
type=int, default=125, required=False,
help="Number of particles in the simulation box")
parser.add_argument("--lb_sites_per_particle", metavar="N_LB", action="store",
type=float, default=28, required=False,
help="Number of particles in the simulation box")
parser.add_argument("--volume_fraction", metavar="FRAC", action="store",
type=float, default=0.03, required=False,
help="Fraction of the simulation box volume occupied by "
"particles (range: [0.01-0.74], default: 0.50)")
group = parser.add_mutually_exclusive_group()
group.add_argument("--output", metavar="FILEPATH", action="store",
type=str, required=False, default="benchmarks.csv",
help="Output file (default: benchmarks.csv)")
args = parser.parse_args()
# process and check arguments
n_iterations = 30
assert args.volume_fraction > 0, "volume_fraction must be a positive number"
assert args.volume_fraction < np.pi / (3 * np.sqrt(2)), \
"volume_fraction exceeds the physical limit of sphere packing (~0.74)"
import espressomd
required_features = ["LENNARD_JONES"]
espressomd.assert_features(required_features)
# System
#############################################################
system = espressomd.System(box_l=[1, 1, 1])
# Interaction parameters (Lennard-Jones)
#############################################################
lj_eps = 1.0 # LJ epsilon
lj_sig = 1.0 # particle diameter
lj_cut = lj_sig * 2**(1. / 6.) # cutoff distance
# System parameters
#############################################################
n_proc = system.cell_system.get_state()['n_nodes']
n_part = n_proc * args.particles_per_core
# volume of N spheres with radius r: N * (4/3*pi*r^3)
box_l = (n_part * 4. / 3. * np.pi * (lj_sig / 2.)**3
/ args.volume_fraction)**(1. / 3.)
lb_grid = int((round(n_part * args.lb_sites_per_particle)**(1. / 3)))
agrid = box_l / lb_grid
measurement_steps = int(max(120**3 / lb_grid**3, 50))
# System
#############################################################
system.box_l = 3 * (box_l,)
# PRNG seeds
#############################################################
# np.random.seed(1)
# Integration parameters
#############################################################
system.time_step = 0.01
system.cell_system.skin = 0.5
system.thermostat.turn_off()
#############################################################
# Setup System #
#############################################################
# Interaction setup
#############################################################
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, shift="auto")
# Particle setup
#############################################################
# Warmup Integration #
#############################################################
system.integrator.set_steepest_descent(
f_max=0,
gamma=0.001,
max_displacement=0.01)
# warmup
while system.analysis.energy()["total"] > 0.1 * n_part:
print("minimization: {:.1f}".format(system.analysis.energy()["total"]))
system.integrator.run(20)
print("minimization: {:.1f}".format(system.analysis.energy()["total"]))
print()
system.integrator.set_vv()
system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42)
# tuning and equilibration
print("Tune skin: {}".format(system.cell_system.tune_skin(
min_skin=0.2, max_skin=1, tol=0.05, int_steps=100)))
system.integrator.run(500)
print("Tune skin: {}".format(system.cell_system.tune_skin(
min_skin=0.2, max_skin=1, tol=0.05, int_steps=100)))
system.integrator.run(500)
system.thermostat.turn_off()
print("lb sites", lb_grid, "agrid", agrid)
if "LBFluid" in dir(espressomd.lb):
LBClass = espressomd.lb.LBFluid
elif "LBFluidWalberla" in dir(espressomd.lb):
LBClass = espressomd.lb.LBFluidWalberla
else:
raise Exception("LB not built in")
lbf = LBClass(agrid=agrid, dens=1, visc=1, tau=system.time_step, kT=1, seed=1)
system.actors.add(lbf)
print("lb shape", lbf.shape)
system.thermostat.set_lb(gamma=10, LB_fluid=lbf, seed=2)
# time integration loop
print("Timing every {} steps".format(measurement_steps))
main_tick = time()
all_t = []
for i in range(n_iterations):
tick = time()
system.integrator.run(measurement_steps)
tock = time()
t = (tock - tick) / measurement_steps
print("step {}, time = {:.2e}, verlet: {:.2f}, energy: {:.2e}"
.format(i, t, system.cell_system.get_state()["verlet_reuse"],
system.analysis.energy()["total"]))
all_t.append(t)
main_tock = time()
# average time
all_t = np.array(all_t)
avg = np.average(all_t)
ci = 1.96 * np.std(all_t) / np.sqrt(len(all_t) - 1)
print("average: {:.3e} +/- {:.3e} (95% C.I.)".format(avg, ci))
cmd = " ".join(x for x in sys.argv[1:] if not x.startswith("--output"))
report = ('"{script}","{arguments}",{cores},{mean:.3e},'
'{ci:.3e},{n},{dur:.1f}\n'.format(
script=os.path.basename(sys.argv[0]), arguments=cmd,
cores=n_proc, dur=main_tock - main_tick, n=measurement_steps,
mean=avg, ci=ci))
if not os.path.isfile(args.output):
report = ('"script","arguments","cores","mean","ci",'
'"nsteps","duration"\n' + report)
with open(args.output, "a") as f:
f.write(report)
| gpl-3.0 |
EduPepperPDTesting/pepper2013-testing | lms/djangoapps/sso/cache.py | 42 | 3438 | # Copyright (C) 2011-2012 Yaco Sistemas (http://www.yaco.es)
# Copyright (C) 2010 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from saml2.cache import Cache
from saml2.ident import code, decode
class DjangoSessionCacheAdapter(dict):
"""A cache of things that are stored in the Django Session"""
key_prefix = '_saml2'
def __init__(self, django_session, key_suffix):
self.session = django_session
self.key = self.key_prefix + key_suffix
super(DjangoSessionCacheAdapter, self).__init__(self._get_objects())
def _get_objects(self):
return self.session.get(self.key, {})
def _set_objects(self, objects):
self.session[self.key] = objects
def sync(self):
objs = {}
objs.update(self)
self._set_objects(objs)
class OutstandingQueriesCache(object):
"""Handles the queries that have been sent to the IdP and have not
been replied yet.
"""
def __init__(self, django_session):
self._db = DjangoSessionCacheAdapter(django_session,
'_outstanding_queries')
def outstanding_queries(self):
return self._db._get_objects()
def set(self, saml2_session_id, came_from):
self._db[saml2_session_id] = came_from
self._db.sync()
def delete(self, saml2_session_id):
if saml2_session_id in self._db:
del self._db[saml2_session_id]
self._db.sync()
class IdentityCache(Cache):
"""Handles information about the users that have been succesfully
logged in.
This information is useful because when the user logs out we must
know where does he come from in order to notify such IdP/AA.
The current implementation stores this information in the Django session.
"""
def __init__(self, django_session):
self._db = DjangoSessionCacheAdapter(django_session, '_identities')
self._sync = True
def get(self, name_id, entity_id, *args, **kwargs):
info = super(IdentityCache, self).get(name_id, entity_id, *args, **kwargs)
try:
name_id = info['name_id']
except KeyError:
pass
else:
info = dict(info)
info['name_id'] = decode(name_id)
return info
def set(self, name_id, entity_id, info, *args, **kwargs):
try:
name_id = info['name_id']
except KeyError:
pass
else:
info = dict(info)
info['name_id'] = code(name_id)
return super(IdentityCache, self).set(name_id, entity_id, info, *args, **kwargs)
class StateCache(DjangoSessionCacheAdapter):
"""Store state information that is needed to associate a logout
request with its response.
"""
def __init__(self, django_session):
super(StateCache, self).__init__(django_session, '_state')
| agpl-3.0 |
alhashash/odoo | openerp/osv/query.py | 8 | 7446 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 OpenERP S.A. http://www.openerp.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
def _quote(to_quote):
if '"' not in to_quote:
return '"%s"' % to_quote
return to_quote
class Query(object):
"""
Dumb implementation of a Query object, using 3 string lists so far
for backwards compatibility with the (table, where_clause, where_params) previously used.
TODO: To be improved after v6.0 to rewrite part of the ORM and add support for:
- auto-generated multiple table aliases
- multiple joins to the same table with different conditions
- dynamic right-hand-side values in domains (e.g. a.name = a.description)
- etc.
"""
def __init__(self, tables=None, where_clause=None, where_clause_params=None, joins=None):
# holds the list of tables joined using default JOIN.
# the table names are stored double-quoted (backwards compatibility)
self.tables = tables or []
# holds the list of WHERE clause elements, to be joined with
# 'AND' when generating the final query
self.where_clause = where_clause or []
# holds the parameters for the formatting of `where_clause`, to be
# passed to psycopg's execute method.
self.where_clause_params = where_clause_params or []
# holds table joins done explicitly, supporting outer joins. The JOIN
# condition should not be in `where_clause`. The dict is used as follows:
# self.joins = {
# 'table_a': [
# ('table_b', 'table_a_col1', 'table_b_col', 'LEFT JOIN'),
# ('table_c', 'table_a_col2', 'table_c_col', 'LEFT JOIN'),
# ('table_d', 'table_a_col3', 'table_d_col', 'JOIN'),
# ]
# }
# which should lead to the following SQL:
# SELECT ... FROM "table_a" LEFT JOIN "table_b" ON ("table_a"."table_a_col1" = "table_b"."table_b_col")
# LEFT JOIN "table_c" ON ("table_a"."table_a_col2" = "table_c"."table_c_col")
self.joins = joins or {}
def _get_table_aliases(self):
from openerp.osv.expression import get_alias_from_query
return [get_alias_from_query(from_statement)[1] for from_statement in self.tables]
def _get_alias_mapping(self):
from openerp.osv.expression import get_alias_from_query
mapping = {}
for table in self.tables:
alias, statement = get_alias_from_query(table)
mapping[statement] = table
return mapping
def add_join(self, connection, implicit=True, outer=False):
""" Join a destination table to the current table.
:param implicit: False if the join is an explicit join. This allows
to fall back on the previous implementation of ``join`` before
OpenERP 7.0. It therefore adds the JOIN specified in ``connection``
If True, the join is done implicitely, by adding the table alias
in the from clause and the join condition in the where clause
of the query. Implicit joins do not handle outer parameter.
:param connection: a tuple ``(lhs, table, lhs_col, col, link)``.
The join corresponds to the SQL equivalent of::
(lhs.lhs_col = table.col)
Note that all connection elements are strings. Please refer to expression.py for more details about joins.
:param outer: True if a LEFT OUTER JOIN should be used, if possible
(no promotion to OUTER JOIN is supported in case the JOIN
was already present in the query, as for the moment
implicit INNER JOINs are only connected from NON-NULL
columns so it would not be correct (e.g. for
``_inherits`` or when a domain criterion explicitly
adds filtering)
"""
from openerp.osv.expression import generate_table_alias
(lhs, table, lhs_col, col, link) = connection
alias, alias_statement = generate_table_alias(lhs, [(table, link)])
if implicit:
if alias_statement not in self.tables:
self.tables.append(alias_statement)
condition = '("%s"."%s" = "%s"."%s")' % (lhs, lhs_col, alias, col)
self.where_clause.append(condition)
else:
# already joined
pass
return alias, alias_statement
else:
aliases = self._get_table_aliases()
assert lhs in aliases, "Left-hand-side table %s must already be part of the query tables %s!" % (lhs, str(self.tables))
if alias_statement in self.tables:
# already joined, must ignore (promotion to outer and multiple joins not supported yet)
pass
else:
# add JOIN
self.tables.append(alias_statement)
self.joins.setdefault(lhs, []).append((alias, lhs_col, col, outer and 'LEFT JOIN' or 'JOIN'))
return alias, alias_statement
def get_sql(self):
""" Returns (query_from, query_where, query_params). """
from openerp.osv.expression import get_alias_from_query
query_from = ''
tables_to_process = list(self.tables)
alias_mapping = self._get_alias_mapping()
def add_joins_for_table(table, query_from):
for (dest_table, lhs_col, col, join) in self.joins.get(table, []):
tables_to_process.remove(alias_mapping[dest_table])
query_from += ' %s %s ON ("%s"."%s" = "%s"."%s")' % \
(join, alias_mapping[dest_table], table, lhs_col, dest_table, col)
query_from = add_joins_for_table(dest_table, query_from)
return query_from
for table in tables_to_process:
query_from += table
table_alias = get_alias_from_query(table)[1]
if table_alias in self.joins:
query_from = add_joins_for_table(table_alias, query_from)
query_from += ','
query_from = query_from[:-1] # drop last comma
return query_from, " AND ".join(self.where_clause), self.where_clause_params
def __str__(self):
return '<osv.Query: "SELECT ... FROM %s WHERE %s" with params: %r>' % self.get_sql()
| agpl-3.0 |
frontg8/frontg8lib | doc/ext/breathe/breathe/renderer/filter.py | 1 | 37770 | """
Filters
-------
Filters are an interesting and somewhat challenging part of the code base. They are used for
two different purposes:
- To figure out which nodes in the xml hierarchy to start rendering from. These are called
'finder filters' or 'content filters'. This is done before rendering starts.
- To figure out which nodes under a selected nodes in the xml hierarchy should be rendered. These
are called 'render filters'. This is done during the render process with a test in the
DoxygenToRstRendererFactory.
General Implementation
~~~~~~~~~~~~~~~~~~~~~~
Filters are essential just tests to see if a node matches certain parameters that are needed to
decide whether or not to include it in some output.
As these filters are declared once and then used on multiple nodes, we model them as object
hierarchies that encapsulate the required test and take a node (with its context) and return True or
False.
If you wanted a test which figures out if a node has the node_type 'memberdef' you might create the
following object hierarchy:
node_is_memberdef = InFilter(AttributeAccessor(Node(), 'node_type'), ['memberdef'])
This reads from the inside out, as get the node, then get the node_type attribute from it, and see
if the value of the attribute is in the list ['memberdef'].
The Node() is called a 'Selector'. Parent() is also a selector. It means given the current context,
work with the parent of the current node rather than the node itself. This allows you to frame tests
in terms of a node's parent as well as the node which helps when we want nodes with particular
parents and not others.
The AttributeAccessor() is called an 'Accessor'. It wraps up an attempt to access a particular
attribute on the selected node. There are quite a few different specific accessors but they can
mostly be generalised with the AttributeAccessor. This code has evolved over time and initially the
implementation involved specific accessor classes (which are still used in large parts of it.)
The InFilter() is unsurprisingly called a 'Filter'. There are lots of different filters. Filters
either act on the results of Accessors or on the results of other Filters and they always return
True or False. The AndFilter and the OrFilter can be used to combine the outputs of other Filters
with logical 'and' and 'or' operations.
You can build up some pretty complex expressions with this level of freedom as you
might imagine. The complexity is unfortunate but necessary as the nature of filtering the xml is
quite complex.
Finder Filters
~~~~~~~~~~~~~~
The implementation of the filters can change a little depending on how they are called. Finder
filters are called from the breathe.finder.doxygen.index and breathe.finder.doxygen.compound files.
They are called like this:
# Descend down the hierarchy
# ...
if filter_.allow(node_stack):
matches.append(self.data_object)
# Keep on descending
# ...
This means that the result of the filter does not stop us descending down the hierarchy and testing
more nodes. This simplifies the filters as they only have to return true for the exact nodes they
are interested in and they don't have to worry about allowing the iteration down the hierarchy to
continue for nodes which don't match.
An example of a finder filter is:
AndFilter(
InFilter(NodeTypeAccessor(Node()), ["compound"]),
InFilter(KindAccessor(Node()), ["group"]),
InFilter(NameAccessor(Node()), ["mygroup"])
)
This says, return True for all the nodes of node_type 'compound' with 'kind' set to 'group' which
have the name 'mygroup'. It returns false for everything else, but when a node matching this is
found then it is added to the matches list by the code above.
It is therefore relatively easy to write finder filters. If you have two separate node filters like
the one above and you want to match on both of them then you can do:
OrFilter(
node_filter_1,
node_filter_2
)
To combine them.
Content Filters
~~~~~~~~~~~~~~~
Content filters are harder than the finder filters as they are responsible for halting the iteration
down the hierarchy if they return false. This means that if you're interested in memberdef nodes
with a particular attribute then you have to check for that but also include a clause which allows
all other non-memberdef nodes to pass through as you don't want to interrupt them.
This means you end up with filters like this:
OrFilter(
AndFilter(
InFilter(NodeTypeAccessor(Node()), ["compound"]),
InFilter(KindAccessor(Node()), ["group"]),
InFilter(NameAccessor(Node()), ["mygroup"])
),
NotFilter(
AndFilter(
InFilter(NodeTypeAccessor(Node()), ["compound"]),
InFilter(KindAccessor(Node()), ["group"]),
)
)
)
Which is to say that we want to let through a compound, with kind group, with name 'mygroup' but
we're also happy if the node is **not** a compund with kind group. Really we just don't want to let
through any compounds with kind group with name other than 'mygroup'. As such, we can rephrase this
as:
NotFilter(
AndFilter(
InFilter(NodeTypeAccessor(Node()), ["compound"]),
InFilter(KindAccessor(Node()), ["group"]),
NotFilter(InFilter(NameAccessor(Node()), ["mygroup"]))
)
)
Using logical manipulation we can rewrite this as:
OrFilter(
NotFilter(InFilter(NodeTypeAccessor(Node()), ["compound"])),
NotFilter(InFilter(KindAccessor(Node()), ["group"])),
InFilter(NameAccessor(Node()), ["mygroup"])
)
We reads: allow if it isn't a compound, or if it is a compound but doesn't have a 'kind' of 'group',
but if it is a compound and has a 'kind' of 'group then only allow it if it is named 'mygroup'.
Helper Syntax
~~~~~~~~~~~~~
Some of these filter declarations get a little awkward to read and write. They are not laid out in
manner which reads smoothly. Additional helper methods and operator overloads have been introduced
to help with this.
AttributeAccessor objects are created in property methods on the Selector classes so:
node.kind
Where node has been declared as a Node() instance. Results in:
AttributeAccessor(Node(), 'kind')
The '==' and '!=' operators on the Accessors have been overloaded to return the appropriate filters
so that:
node.kind == 'group'
Results in:
InFilter(AttributeAccessor(Node(), 'kind'), ['kind'])
We also override the binary 'and' (&), 'or' (|) and 'not' (~) operators in Python to apply
AndFilters, OrFilters and NotFilters respectively. We have to override the binary operators as they
actual 'and', 'or' and 'not' operators cannot be overridden. So:
(node.node_type == 'compound') & (node.name == 'mygroup')
Translates to:
AndFilter(
InFilter(NodeTypeAccessor(Node()), ["compound"])),
InFilter(NameAccessor(Node()), ["mygroup"])
)
Where the former is hopefully more readable without sacrificing too much to the abstract magic of
operator overloads.
Operator Precedences & Extra Parenthesis
''''''''''''''''''''''''''''''''''''''''
As the binary operators have a lower operator precedence than '==' and '!=' and some other operators
we have to include additional parenthesis in the expressions to group them as we want. So instead of
writing:
node.node_type == 'compound' & node.name == 'mygroup'
We have to write:
(node.node_type == 'compound') & (node.name == 'mygroup')
"""
import six
class UnrecognisedKindError(Exception):
pass
class Selector(object):
@property
def node_type(self):
return NodeTypeAccessor(self)
@property
def kind(self):
return AttributeAccessor(self, 'kind')
@property
def node_name(self):
return AttributeAccessor(self, 'node_name')
@property
def name(self):
return AttributeAccessor(self, 'name')
@property
def briefdescription(self):
return AttributeAccessor(self, 'briefdescription')
@property
def detaileddescription(self):
return AttributeAccessor(self, 'detaileddescription')
@property
def prot(self):
return AttributeAccessor(self, 'prot')
@property
def valueOf(self):
return AttributeAccessor(self, 'valueOf_')
@property
def id(self):
return AttributeAccessor(self, 'id')
class Ancestor(Selector):
def __init__(self, generations):
self.generations = generations
def __call__(self, node_stack):
return node_stack[self.generations]
class Parent(Selector):
def __call__(self, node_stack):
return node_stack[1]
class Node(Selector):
def __call__(self, node_stack):
return node_stack[0]
class Accessor(object):
def __init__(self, selector):
self.selector = selector
def __eq__(self, value):
return InFilter(self, [value])
def __ne__(self, value):
return NotFilter(InFilter(self, [value]))
def is_one_of(self, collection):
return InFilter(self, collection)
def has_content(self):
return HasContentFilter(self)
def endswith(self, options):
return EndsWithFilter(self, options)
class NameAccessor(Accessor):
def __call__(self, node_stack):
return self.selector(node_stack).name
class NodeNameAccessor(Accessor):
"""Check the .node_name member which is declared on refTypeSub nodes
It distinguishes between innerclass, innernamespace, etc.
"""
def __call__(self, node_stack):
return self.selector(node_stack).node_name
class NodeTypeAccessor(Accessor):
def __call__(self, node_stack):
data_object = self.selector(node_stack)
try:
return data_object.node_type
except AttributeError as e:
# Horrible hack to silence errors on filtering unicode objects
# until we fix the parsing
if type(data_object) == six.text_type:
return "unicode"
else:
raise e
class KindAccessor(Accessor):
def __call__(self, node_stack):
return self.selector(node_stack).kind
class AttributeAccessor(Accessor):
"""Returns the value of a particular attribute on the selected node.
AttributeAccessor(Node(), 'name') returns the value of ``node.name``.
"""
def __init__(self, selector, attribute_name):
Accessor.__init__(self, selector)
self.attribute_name = attribute_name
def __call__(self, node_stack):
return getattr(self.selector(node_stack), self.attribute_name)
class LambdaAccessor(Accessor):
def __init__(self, selector, func):
Accessor.__init__(self, selector)
self.func = func
def __call__(self, node_stack):
return self.func(self.selector(node_stack))
class NamespaceAccessor(Accessor):
def __call__(self, node_stack):
return self.selector(node_stack).namespaces
class Filter(object):
def __and__(self, other):
return AndFilter(self, other)
def __or__(self, other):
return OrFilter(self, other)
def __invert__(self):
return NotFilter(self)
class HasAncestorFilter(Filter):
def __init__(self, generations):
self.generations = generations
def allow(self, node_stack):
return len(node_stack) > self.generations
class HasContentFilter(Filter):
def __init__(self, accessor):
self.accessor = accessor
def allow(self, node_stack):
"""Detects if the node in questions has an empty .content_ property.
"""
return bool(self.accessor(node_stack).content_)
class EndsWithFilter(Filter):
"""Detects if the string result of the accessor ends with any of the strings in the ``options``
iterable parameter.
"""
def __init__(self, accessor, options):
self.accessor = accessor
self.options = options
def allow(self, node_stack):
string = self.accessor(node_stack)
for entry in self.options:
if string.endswith(entry):
return True
return False
class InFilter(Filter):
"""Checks if what is returned from the accessor is 'in' in the members"""
def __init__(self, accessor, members):
self.accessor = accessor
self.members = members
def allow(self, node_stack):
name = self.accessor(node_stack)
return name in self.members
class GlobFilter(Filter):
def __init__(self, accessor, glob):
self.accessor = accessor
self.glob = glob
def allow(self, node_stack):
text = self.accessor(node_stack)
return self.glob.match(text)
class FilePathFilter(Filter):
def __init__(self, accessor, target_file, path_handler):
self.accessor = accessor
self.target_file = target_file
self.path_handler = path_handler
def allow(self, node_stack):
location = self.accessor(node_stack).file
if self.path_handler.includes_directory(self.target_file):
# If the target_file contains directory separators then
# match against the same length at the end of the location
#
location_match = location[-len(self.target_file):]
return location_match == self.target_file
else:
# If there are no separators, match against the whole filename
# at the end of the location
#
# This is to prevent "Util.cpp" matching "PathUtil.cpp"
#
location_basename = self.path_handler.basename(location)
return location_basename == self.target_file
class NamespaceFilter(Filter):
def __init__(self, namespace_accessor, name_accessor):
self.namespace_accessor = namespace_accessor
self.name_accessor = name_accessor
def allow(self, node_stack):
namespaces = self.namespace_accessor(node_stack)
name = self.name_accessor(node_stack)
try:
namespace, name = name.rsplit("::", 1)
except ValueError:
namespace, name = "", name
return namespace in namespaces
class OpenFilter(Filter):
def allow(self, node_stack):
return True
class ClosedFilter(Filter):
def allow(self, node_stack):
return False
class NotFilter(Filter):
def __init__(self, child_filter):
self.child_filter = child_filter
def allow(self, node_stack):
return not self.child_filter.allow(node_stack)
class AndFilter(Filter):
def __init__(self, *filters):
self.filters = filters
def allow(self, node_stack):
# If any filter returns False then return False
for filter_ in self.filters:
if not filter_.allow(node_stack):
return False
return True
class OrFilter(Filter):
"""Provides a short-cutted 'or' operation between two filters"""
def __init__(self, *filters):
self.filters = filters
def allow(self, node_stack):
# If any filter returns True then return True
for filter_ in self.filters:
if filter_.allow(node_stack):
return True
return False
class IfFilter(Filter):
def __init__(self, condition, if_true, if_false):
self.condition = condition
self.if_true = if_true
self.if_false = if_false
def allow(self, node_stack):
if self.condition.allow(node_stack):
return self.if_true.allow(node_stack)
else:
return self.if_false.allow(node_stack)
class Glob(object):
def __init__(self, method, pattern):
self.method = method
self.pattern = pattern
def match(self, name):
return self.method(name, self.pattern)
class Gather(object):
def __init__(self, accessor, names):
self.accessor = accessor
self.names = names
def allow(self, node_stack):
self.names.extend(self.accessor(node_stack))
return False
class FilterFactory(object):
# C++ style public entries
public_kinds = set([
"public-type",
"public-func",
"public-attrib",
"public-slot",
"public-static-func",
"public-static-attrib",
])
def __init__(self, path_handler):
self.path_handler = path_handler
self.default_members = ()
self.implementation_filename_extensions = ()
def create_render_filter(self, kind, options):
"""Render filter for group & namespace blocks"""
if kind not in ['group', 'namespace']:
raise UnrecognisedKindError(kind)
# Generate new dictionary from defaults
filter_options = dict((entry, u'') for entry in self.default_members)
# Update from the actual options
filter_options.update(options)
# Convert the doxygengroup members flag (which just stores None as the value) to an empty
# string to allow the create_class_member_filter to process it properly
if 'members' in filter_options:
filter_options['members'] = u''
node = Node()
grandparent = Ancestor(2)
has_grandparent = HasAncestorFilter(2)
non_class_memberdef = \
has_grandparent \
& (grandparent.node_type == 'compounddef') \
& (grandparent.kind != 'class') \
& (grandparent.kind != 'struct') \
& (node.node_type == 'memberdef')
return (self.create_class_member_filter(filter_options) | non_class_memberdef) \
& self.create_innerclass_filter(filter_options) \
& self.create_outline_filter(filter_options)
def create_class_filter(self, target, options):
"""Content filter for classes based on various directive options"""
# Generate new dictionary from defaults
filter_options = dict((entry, u'') for entry in self.default_members)
# Update from the actual options
filter_options.update(options)
return AndFilter(
self.create_class_member_filter(filter_options),
self.create_innerclass_filter(filter_options, outerclass=target),
self.create_outline_filter(filter_options),
self.create_show_filter(filter_options),
)
def create_innerclass_filter(self, options, outerclass=''):
"""
:param outerclass: Should be the class/struct being target by the directive calling this
code. If it is a group or namespace directive then it should be left
blank. It is used when looking for names listed in the :members: option.
The name should include any additional namespaces that the target class
is in.
"""
node = Node()
node_is_innerclass = (node.node_type == "ref") & (node.node_name == "innerclass")
parent = Parent()
parent_is_compounddef = parent.node_type == 'compounddef'
parent_is_class = parent.kind.is_one_of(['class', 'struct'])
allowed = set()
all_options = {
'protected-members': 'protected',
'private-members': 'private',
}
for option, scope in all_options.iteritems():
if option in options:
allowed.add(scope)
node_is_innerclass_in_class = parent_is_compounddef & parent_is_class & node_is_innerclass
public_innerclass_filter = ClosedFilter()
if 'members' in options:
if options['members'].strip():
text = options["members"]
prefix = ('%s::' % outerclass) if outerclass else ''
# Matches sphinx-autodoc behaviour of comma separated values
members = set(['%s%s' % (prefix, x.strip()) for x in text.split(",")])
node_valueOf_is_in_members = node.valueOf.is_one_of(members)
# Accept any nodes which don't have a "sectiondef" as a parent or, if they do, only
# accept them if their names are in the members list
public_innerclass_filter = ~node_is_innerclass_in_class | node_valueOf_is_in_members
else:
allowed.add('public')
node_is_in_allowed_scope = node.prot.is_one_of(allowed)
innerclass = ~ node_is_innerclass_in_class | node_is_in_allowed_scope
description = self._create_description_filter(True, 'compounddef', options)
# Put parent check last as we only want to check parents of innerclass's otherwise we have
# to check the parent's type as well
return innerclass | public_innerclass_filter | description
def create_show_filter(self, options):
"""Currently only handles the header-file entry"""
try:
text = options["show"]
except KeyError:
# Allow through everything except the header-file includes nodes
return OrFilter(
NotFilter(InFilter(NodeTypeAccessor(Parent()), ["compounddef"])),
NotFilter(InFilter(NodeTypeAccessor(Node()), ["inc"]))
)
if text == "header-file":
# Allow through everything, including header-file includes
return OpenFilter()
# Allow through everything except the header-file includes nodes
return OrFilter(
NotFilter(InFilter(NodeTypeAccessor(Parent()), ["compounddef"])),
NotFilter(InFilter(NodeTypeAccessor(Node()), ["inc"]))
)
def _create_description_filter(self, allow, level, options):
"""Whether or not we allow descriptions is determined by the calling function and we just do
whatever the 'allow' function parameter tells us.
"""
node = Node()
node_is_description = node.node_type == 'description'
parent = Parent()
parent_is_level = parent.node_type == level
# Nothing with a parent that's a sectiondef
description_filter = ~ parent_is_level
# Let through any description children of sectiondefs if we output any kind members
if allow:
description_filter = \
(parent_is_level & node_is_description) | ~ parent_is_level
return description_filter
def _create_public_members_filter(self, options):
node = Node()
node_is_memberdef = node.node_type == "memberdef"
node_is_public = node.prot == "public"
parent = Parent()
parent_is_sectiondef = parent.node_type == "sectiondef"
# Nothing with a parent that's a sectiondef
is_memberdef = parent_is_sectiondef & node_is_memberdef
public_members_filter = ~ is_memberdef
# If the user has specified the 'members' option with arguments then we only pay attention
# to that and not to any other member settings
if "members" in options:
if options['members'].strip():
text = options["members"]
# Matches sphinx-autodoc behaviour of comma separated values
members = set([x.strip() for x in text.split(",")])
node_name_is_in_members = node.name.is_one_of(members)
# Accept any nodes which don't have a "sectiondef" as a parent or, if they do, only
# accept them if their names are in the members list
public_members_filter = \
(parent_is_sectiondef & node_name_is_in_members) | ~ parent_is_sectiondef
else:
# Select anything that doesn't have a parent which is a sectiondef, or, if it does,
# only select the public ones
public_members_filter = \
(is_memberdef & node_is_public) | ~ is_memberdef
return public_members_filter
def _create_non_public_members_filter(self, prot, option_name, options):
"""'prot' is the doxygen xml term for 'public', 'protected' and 'private' categories."""
node = Node()
node_is_memberdef = node.node_type == "memberdef"
node_is_public = node.prot == prot
parent = Parent()
parent_is_sectiondef = parent.node_type == "sectiondef"
# Nothing with a parent that's a sectiondef
is_memberdef = parent_is_sectiondef & node_is_memberdef
filter_ = ~ is_memberdef
if option_name in options:
# Allow anything that isn't a memberdef, or if it is only allow the public ones
filter_ = ~ is_memberdef | node_is_public
return filter_
def _create_undoc_members_filter(self, options):
node = Node()
node_is_memberdef = node.node_type == 'memberdef'
node_has_description = node.briefdescription.has_content() \
| node.detaileddescription.has_content()
# Allow anything that isn't a memberdef, or if it is only allow the ones with a description
undoc_members_filter = ~ node_is_memberdef | node_has_description
if 'undoc-members' in options:
undoc_members_filter = OpenFilter()
return undoc_members_filter
def create_class_member_filter(self, options):
"""Content filter based on :members: and :private-members: classes"""
# I can't fully explain the filtering of descriptions here. More testing needed to figure
# out when it is needed. This approach reflects the old code that was here but it wasn't
# commented (my fault.) I wonder if maybe the public and private declarations themselves can
# be documented and we need to let them through. Not sure.
allow = 'members' in options \
or 'protected-members' in options \
or 'private-members' in options
description = self._create_description_filter(allow, 'sectiondef', options)
# Create all necessary filters and combine them
public_members = self._create_public_members_filter(options)
protected_members = self._create_non_public_members_filter(
'protected',
'protected-members',
options
)
private_members = self._create_non_public_members_filter(
'private',
'private-members',
options
)
undoc_members = self._create_undoc_members_filter(options)
# Allow any public/private members which also fit the undoc filter and all the descriptions
allowed_members = (public_members | protected_members | private_members) & undoc_members
return allowed_members | description
def create_outline_filter(self, options):
if 'outline' in options:
node = Node()
return ~ node.node_type.is_one_of(["description", "inc"])
else:
return OpenFilter()
def create_file_filter(self, filename, options):
valid_names = []
filter_ = AndFilter(
NotFilter(
# Gather the "namespaces" attribute from the
# compounddef for the file we're rendering and
# store the information in the "valid_names" list
#
# Gather always returns false, so, combined with
# the NotFilter this chunk always returns true and
# so does not affect the result of the filtering
AndFilter(
InFilter(NodeTypeAccessor(Node()), ["compounddef"]),
InFilter(KindAccessor(Node()), ["file"]),
FilePathFilter(
LambdaAccessor(Node(), lambda x: x.location),
filename, self.path_handler
),
Gather(LambdaAccessor(Node(), lambda x: x.namespaces), valid_names)
)
),
NotFilter(
# Take the valid_names and everytime we handle an
# innerclass or innernamespace, check that its name
# was one of those initial valid names so that we
# never end up rendering a namespace or class that
# wasn't in the initial file. Notably this is
# required as the location attribute for the
# namespace in the xml is unreliable.
AndFilter(
InFilter(NodeTypeAccessor(Parent()), ["compounddef"]),
InFilter(NodeTypeAccessor(Node()), ["ref"]),
InFilter(NodeNameAccessor(Node()), ["innerclass", "innernamespace"]),
NotFilter(
InFilter(
LambdaAccessor(Node(), lambda x: x.content_[0].getValue()),
valid_names
)
)
)
),
NotFilter(
# Ignore innerclasses and innernamespaces that are inside a
# namespace that is going to be rendered as they will be
# rendered with that namespace and we don't want them twice
AndFilter(
InFilter(NodeTypeAccessor(Parent()), ["compounddef"]),
InFilter(NodeTypeAccessor(Node()), ["ref"]),
InFilter(NodeNameAccessor(Node()), ["innerclass", "innernamespace"]),
NamespaceFilter(
NamespaceAccessor(Parent()),
LambdaAccessor(Node(), lambda x: x.content_[0].getValue())
)
)
),
NotFilter(
# Ignore memberdefs from files which are different to
# the one we're rendering. This happens when we have to
# cross into a namespace xml file which has entries
# from multiple files in it
AndFilter(
InFilter(NodeTypeAccessor(Node()), ["memberdef"]),
NotFilter(
FilePathFilter(LambdaAccessor(Node(), lambda x: x.location),
filename, self.path_handler)
)
)
),
NotFilter(
# Ignore compounddefs which are from another file
# (normally means classes and structs which are in a
# namespace that we have other interests in) but only
# check it if the compounddef is not a namespace
# itself, as for some reason compounddefs for
# namespaces are registered with just a single file
# location even if they namespace is spread over
# multiple files
AndFilter(
InFilter(NodeTypeAccessor(Node()), ["compounddef"]),
NotFilter(InFilter(KindAccessor(Node()), ["namespace"])),
NotFilter(
FilePathFilter(LambdaAccessor(Node(), lambda x: x.location),
filename, self.path_handler)
)
)
)
)
return AndFilter(
self.create_outline_filter(options),
filter_
)
def create_content_filter(self, kind, options):
"""Returns a filter which matches the contents of the or namespace but not the group or
namepace name or description.
This allows the groups to be used to structure sections of the documentation rather than to
structure and further document groups of documentation
As a finder/content filter we only need to match exactly what we're interested in.
"""
if kind not in ['group', 'namespace']:
raise UnrecognisedKindError(kind)
node = Node()
# Filter for public memberdefs
node_is_memberdef = node.node_type == 'memberdef'
node_is_public = node.prot == 'public'
public_members = node_is_memberdef & node_is_public
# Filter for public innerclasses
parent = Parent()
parent_is_compounddef = parent.node_type == 'compounddef'
parent_is_class = parent.kind == kind
node_is_innerclass = (node.node_type == "ref") & (node.node_name == "innerclass")
node_is_public = node.prot == 'public'
public_innerclass = parent_is_compounddef & parent_is_class \
& node_is_innerclass & node_is_public
return public_members | public_innerclass
def create_index_filter(self, options):
filter_ = AndFilter(
NotFilter(
AndFilter(
InFilter(NodeTypeAccessor(Parent()), ["compounddef"]),
InFilter(NodeTypeAccessor(Node()), ["ref"]),
InFilter(NodeNameAccessor(Node()), ["innerclass", "innernamespace"])
)
),
NotFilter(
AndFilter(
InFilter(NodeTypeAccessor(Parent()), ["compounddef"]),
InFilter(KindAccessor(Parent()), ["group"]),
InFilter(NodeTypeAccessor(Node()), ["sectiondef"]),
InFilter(KindAccessor(Node()), ["func"])
)
)
)
return AndFilter(
self.create_outline_filter(options),
filter_
)
def create_open_filter(self):
"""Returns a completely open filter which matches everything"""
return OpenFilter()
def create_id_filter(self, node_type, refid):
node = Node()
return (node.node_type == node_type) & (node.id == refid)
def create_file_finder_filter(self, filename):
filter_ = AndFilter(
InFilter(NodeTypeAccessor(Node()), ["compounddef"]),
InFilter(KindAccessor(Node()), ["file"]),
FilePathFilter(LambdaAccessor(Node(), lambda x: x.location), filename,
self.path_handler)
)
return filter_
def create_member_finder_filter(self, namespace, name, kind):
"""Returns a filter which looks for a member with the specified name and kind."""
node = Node()
parent = Parent()
node_matches = (node.node_type == 'member') \
& (node.kind == kind) \
& (node.name == name)
if namespace:
parent_matches = (parent.node_type == 'compound') \
& ((parent.kind == 'namespace') | (parent.kind == 'class')) \
& (parent.name == namespace)
return parent_matches & node_matches
else:
is_implementation_file = parent.name.endswith(self.implementation_filename_extensions)
parent_is_compound = parent.node_type == 'compound'
parent_is_file = (parent.kind == 'file') & (~ is_implementation_file)
parent_is_not_file = parent.kind != 'file'
return (parent_is_compound & parent_is_file & node_matches) \
| (parent_is_compound & parent_is_not_file & node_matches)
def create_function_finder_filter(self, namespace, name):
parent = Parent()
parent_is_compound = parent.node_type == 'compound'
parent_is_group = parent.kind == 'group'
function_filter = self.create_member_finder_filter(namespace, name, 'function')
# Get matching functions but only ones where the parent is not a group. We want to skip
# function entries in groups as we'll find the same functions in a file's xml output
# elsewhere and having more than one match is confusing for our logic later on.
return function_filter & ~(parent_is_compound & parent_is_group)
def create_enumvalue_finder_filter(self, name):
"""Returns a filter which looks for an enumvalue with the specified name."""
node = Node()
return (node.node_type == 'enumvalue') & (node.name == name)
def create_compound_finder_filter(self, name, kind):
"""Returns a filter which looks for a compound with the specified name and kind."""
node = Node()
return (node.node_type == 'compound') & (node.kind == kind) & (node.name == name)
def create_finder_filter(self, kind, name):
"""Returns a filter which looks for the compound node from the index which is a group node
(kind=group) and has the appropriate name
The compound node should reference the group file which we can parse for the group
contents.
"""
if kind == 'group':
filter_ = AndFilter(
InFilter(NodeTypeAccessor(Node()), ["compound"]),
InFilter(KindAccessor(Node()), ["group"]),
InFilter(NameAccessor(Node()), [name])
)
else:
# Assume kind == 'namespace'
filter_ = AndFilter(
InFilter(NodeTypeAccessor(Node()), ["compound"]),
InFilter(KindAccessor(Node()), ["namespace"]),
InFilter(NameAccessor(Node()), [name])
)
return filter_
def get_config_values(self, app):
"""Extract the breathe_default_members config value and store it.
This method is called on the 'builder-init' event in Sphinx"""
self.default_members = app.config.breathe_default_members
self.implementation_filename_extensions = \
app.config.breathe_implementation_filename_extensions
| bsd-3-clause |
inspyration/odoo | addons/l10n_be_hr_payroll_account/__openerp__.py | 298 | 1626 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Belgium - Payroll with Accounting',
'category': 'Localization',
'author': 'OpenERP SA',
'depends': ['l10n_be_hr_payroll', 'hr_payroll_account', 'l10n_be'],
'version': '1.0',
'description': """
Accounting Data for Belgian Payroll Rules.
==========================================
""",
'auto_install': True,
'website': 'https://www.odoo.com/page/accounting',
'demo': [],
'data':[
'l10n_be_wizard.yml',
'l10n_be_hr_payroll_account_data.xml',
'data/hr.salary.rule.csv',
],
'installable': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
shravya-ks/ns-3-tcp-prague | doc/tutorial/source/conf.py | 72 | 7057 | # -*- coding: utf-8 -*-
#
# ns-3 documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 14 09:00:39 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ns-3'
copyright = u'2010, ns-3 project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'ns-3-dev'
# The full version, including alpha/beta/rc tags.
release = 'ns-3-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'ns3_html_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../..']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Tutorial'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ns-3doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ns-3-tutorial.tex', u'ns-3 Tutorial',
u'ns-3 project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '../../ns3_html_theme/static/ns-3.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
latex_preamble = '\usepackage{amssymb}'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ns-3-tutorial', u'ns-3 Tutorial',
[u'ns-3 project'], 1)
]
| gpl-2.0 |
jjgoings/McMurchie-Davidson | mmd/utils/spectrum.py | 1 | 3029 | from __future__ import division
import numpy as np
"""Contains some routines to do the (Pade approximant) Fourier transform
as well as some peak-finding routines, useful for post processing a
real-time calculation
"""
def genSpectra(time,dipole,signal):
fw, frequency = pade(time,dipole)
fw_sig, frequency = pade(time,signal)
numerator = np.imag(fw)
denominator = np.abs(fw_sig)
spectra = ((4.0*27.21138602*2*frequency*np.pi*(numerator))/(3.0*137.036*denominator))
return frequency, spectra
def pade(time,dipole):
damp_const = 50.0
dipole = np.asarray(dipole) - dipole[0]
stepsize = time[1] - time[0]
damp = np.exp(-(stepsize*np.arange(len(dipole)))/float(damp_const))
dipole *= damp
M = len(dipole)
N = int(np.floor(M / 2))
num_pts = 20000
if N > num_pts:
N = num_pts
# G and d are (N-1) x (N-1)
# d[k] = -dipole[N+k] for k in range(1,N)
d = -dipole[N+1:2*N]
try:
from scipy.linalg import toeplitz, solve_toeplitz
except ImportError:
print("You'll need SciPy version >= 0.17.0")
try:
# Instead, form G = (c,r) as toeplitz
#c = dipole[N:2*N-1]
#r = np.hstack((dipole[1],dipole[N-1:1:-1]))
b = solve_toeplitz((dipole[N:2*N-1],\
np.hstack((dipole[1],dipole[N-1:1:-1]))),d,check_finite=False)
except np.linalg.linalg.LinAlgError:
# OLD CODE: sometimes more stable
# G[k,m] = dipole[N - m + k] for m,k in range(1,N)
G = dipole[N + np.arange(1,N)[:,None] - np.arange(1,N)]
b = np.linalg.solve(G,d)
# Now make b Nx1 where b0 = 1
b = np.hstack((1,b))
# b[m]*dipole[k-m] for k in range(0,N), for m in range(k)
a = np.dot(np.tril(toeplitz(dipole[0:N])),b)
p = np.poly1d(a)
q = np.poly1d(b)
# If you want energies greater than 2*27.2114 eV, you'll need to change
# the default frequency range to something greater.
frequency = np.arange(0.00,2.0,0.0001)
W = np.exp(-1j*frequency*stepsize)
fw = p(W)/q(W)
return fw, frequency
def peaks(spectra,frequency,number=3,thresh=0.01):
""" Return the peaks from the Fourier transform
Variables:
number: integer. number of peaks to print.
thresh: float. Threshhold intensity for printing.
Returns: Energy (eV), Intensity (depends on type of spectra)
"""
from scipy.signal import argrelextrema as pks
# find all peak indices [idx], and remove those below thresh [jdx]
idx = pks(np.abs(spectra),np.greater,order=3)
jdx = np.where((np.abs(spectra[idx]) >= thresh))
kdx = idx[0][jdx[0]] # indices of peaks matching criteria
if number > len(kdx):
number = len(kdx)
print("First "+str(number)+" peaks (eV) found: ")
for i in xrange(number):
print("{0:.4f}".format(frequency[kdx][i]*27.2114),
"{0:.4f}".format(spectra[kdx][i]))
| bsd-3-clause |
danielfrg/remote-pip | rpip/tests/test_output.py | 1 | 1204 | from rpip.output import Output
exit0 = {'exit_code': 0, 'stdout': 'yes', 'stderr': ''}
exit1 = {'exit_code': 1, 'stdout': '', 'stderr': 'ERROR'}
o0 = {'host1': exit0, 'host2': exit0, 'host3': exit0}
o1 = {'host1': exit0, 'host2': exit1, 'host3': exit0}
o2 = {'host1': exit0, 'host2': exit1, 'host3': exit1}
def test_groupby():
o = Output(o0)
groups = o.groupby()
assert len(groups) == 1
nodes, output = groups[0]
assert len(nodes) == 3
assert nodes == ['host3', 'host2', 'host1']
assert output == exit0
def test_groupby2():
o = Output(o1)
groups = o.groupby()
assert len(groups) == 2
nodes, output = groups[0]
assert len(nodes) == 2
assert nodes == ['host3', 'host1']
assert output == exit0
nodes, output = groups[1]
assert len(nodes) == 1
assert nodes == ['host2']
assert output == exit1
def test_groupby3():
o = Output(o2)
groups = o.groupby()
assert len(groups) == 2
nodes, output = groups[0]
assert len(nodes) == 2
assert nodes == ['host3', 'host2']
assert output == exit1
nodes, output = groups[1]
assert len(nodes) == 1
assert nodes == ['host1']
assert output == exit0
| apache-2.0 |
yitian134/chromium | third_party/protobuf/python/google/protobuf/internal/python_message.py | 259 | 40284 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This code is meant to work on Python 2.4 and above only.
#
# TODO(robinson): Helpers for verbose, common checks like seeing if a
# descriptor's cpp_type is CPPTYPE_MESSAGE.
"""Contains a metaclass and helper functions used to create
protocol message classes from Descriptor objects at runtime.
Recall that a metaclass is the "type" of a class.
(A class is to a metaclass what an instance is to a class.)
In this case, we use the GeneratedProtocolMessageType metaclass
to inject all the useful functionality into the classes
output by the protocol compiler at compile-time.
The upshot of all this is that the real implementation
details for ALL pure-Python protocol buffers are *here in
this file*.
"""
__author__ = 'robinson@google.com (Will Robinson)'
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import struct
import weakref
# We use "as" to avoid name collisions with variables.
from google.protobuf.internal import containers
from google.protobuf.internal import decoder
from google.protobuf.internal import encoder
from google.protobuf.internal import message_listener as message_listener_mod
from google.protobuf.internal import type_checkers
from google.protobuf.internal import wire_format
from google.protobuf import descriptor as descriptor_mod
from google.protobuf import message as message_mod
from google.protobuf import text_format
_FieldDescriptor = descriptor_mod.FieldDescriptor
def NewMessage(descriptor, dictionary):
_AddClassAttributesForNestedExtensions(descriptor, dictionary)
_AddSlots(descriptor, dictionary)
def InitMessage(descriptor, cls):
cls._decoders_by_tag = {}
cls._extensions_by_name = {}
cls._extensions_by_number = {}
if (descriptor.has_options and
descriptor.GetOptions().message_set_wire_format):
cls._decoders_by_tag[decoder.MESSAGE_SET_ITEM_TAG] = (
decoder.MessageSetItemDecoder(cls._extensions_by_number))
# Attach stuff to each FieldDescriptor for quick lookup later on.
for field in descriptor.fields:
_AttachFieldHelpers(cls, field)
_AddEnumValues(descriptor, cls)
_AddInitMethod(descriptor, cls)
_AddPropertiesForFields(descriptor, cls)
_AddPropertiesForExtensions(descriptor, cls)
_AddStaticMethods(cls)
_AddMessageMethods(descriptor, cls)
_AddPrivateHelperMethods(cls)
# Stateless helpers for GeneratedProtocolMessageType below.
# Outside clients should not access these directly.
#
# I opted not to make any of these methods on the metaclass, to make it more
# clear that I'm not really using any state there and to keep clients from
# thinking that they have direct access to these construction helpers.
def _PropertyName(proto_field_name):
"""Returns the name of the public property attribute which
clients can use to get and (in some cases) set the value
of a protocol message field.
Args:
proto_field_name: The protocol message field name, exactly
as it appears (or would appear) in a .proto file.
"""
# TODO(robinson): Escape Python keywords (e.g., yield), and test this support.
# nnorwitz makes my day by writing:
# """
# FYI. See the keyword module in the stdlib. This could be as simple as:
#
# if keyword.iskeyword(proto_field_name):
# return proto_field_name + "_"
# return proto_field_name
# """
# Kenton says: The above is a BAD IDEA. People rely on being able to use
# getattr() and setattr() to reflectively manipulate field values. If we
# rename the properties, then every such user has to also make sure to apply
# the same transformation. Note that currently if you name a field "yield",
# you can still access it just fine using getattr/setattr -- it's not even
# that cumbersome to do so.
# TODO(kenton): Remove this method entirely if/when everyone agrees with my
# position.
return proto_field_name
def _VerifyExtensionHandle(message, extension_handle):
"""Verify that the given extension handle is valid."""
if not isinstance(extension_handle, _FieldDescriptor):
raise KeyError('HasExtension() expects an extension handle, got: %s' %
extension_handle)
if not extension_handle.is_extension:
raise KeyError('"%s" is not an extension.' % extension_handle.full_name)
if extension_handle.containing_type is not message.DESCRIPTOR:
raise KeyError('Extension "%s" extends message type "%s", but this '
'message is of type "%s".' %
(extension_handle.full_name,
extension_handle.containing_type.full_name,
message.DESCRIPTOR.full_name))
def _AddSlots(message_descriptor, dictionary):
"""Adds a __slots__ entry to dictionary, containing the names of all valid
attributes for this message type.
Args:
message_descriptor: A Descriptor instance describing this message type.
dictionary: Class dictionary to which we'll add a '__slots__' entry.
"""
dictionary['__slots__'] = ['_cached_byte_size',
'_cached_byte_size_dirty',
'_fields',
'_is_present_in_parent',
'_listener',
'_listener_for_children',
'__weakref__']
def _IsMessageSetExtension(field):
return (field.is_extension and
field.containing_type.has_options and
field.containing_type.GetOptions().message_set_wire_format and
field.type == _FieldDescriptor.TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == _FieldDescriptor.LABEL_OPTIONAL)
def _AttachFieldHelpers(cls, field_descriptor):
is_repeated = (field_descriptor.label == _FieldDescriptor.LABEL_REPEATED)
is_packed = (field_descriptor.has_options and
field_descriptor.GetOptions().packed)
if _IsMessageSetExtension(field_descriptor):
field_encoder = encoder.MessageSetItemEncoder(field_descriptor.number)
sizer = encoder.MessageSetItemSizer(field_descriptor.number)
else:
field_encoder = type_checkers.TYPE_TO_ENCODER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed)
sizer = type_checkers.TYPE_TO_SIZER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed)
field_descriptor._encoder = field_encoder
field_descriptor._sizer = sizer
field_descriptor._default_constructor = _DefaultValueConstructorForField(
field_descriptor)
def AddDecoder(wiretype, is_packed):
tag_bytes = encoder.TagBytes(field_descriptor.number, wiretype)
cls._decoders_by_tag[tag_bytes] = (
type_checkers.TYPE_TO_DECODER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed,
field_descriptor, field_descriptor._default_constructor))
AddDecoder(type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_descriptor.type],
False)
if is_repeated and wire_format.IsTypePackable(field_descriptor.type):
# To support wire compatibility of adding packed = true, add a decoder for
# packed values regardless of the field's options.
AddDecoder(wire_format.WIRETYPE_LENGTH_DELIMITED, True)
def _AddClassAttributesForNestedExtensions(descriptor, dictionary):
extension_dict = descriptor.extensions_by_name
for extension_name, extension_field in extension_dict.iteritems():
assert extension_name not in dictionary
dictionary[extension_name] = extension_field
def _AddEnumValues(descriptor, cls):
"""Sets class-level attributes for all enum fields defined in this message.
Args:
descriptor: Descriptor object for this message type.
cls: Class we're constructing for this message type.
"""
for enum_type in descriptor.enum_types:
for enum_value in enum_type.values:
setattr(cls, enum_value.name, enum_value.number)
def _DefaultValueConstructorForField(field):
"""Returns a function which returns a default value for a field.
Args:
field: FieldDescriptor object for this field.
The returned function has one argument:
message: Message instance containing this field, or a weakref proxy
of same.
That function in turn returns a default value for this field. The default
value may refer back to |message| via a weak reference.
"""
if field.label == _FieldDescriptor.LABEL_REPEATED:
if field.default_value != []:
raise ValueError('Repeated field default value not empty list: %s' % (
field.default_value))
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
# We can't look at _concrete_class yet since it might not have
# been set. (Depends on order in which we initialize the classes).
message_type = field.message_type
def MakeRepeatedMessageDefault(message):
return containers.RepeatedCompositeFieldContainer(
message._listener_for_children, field.message_type)
return MakeRepeatedMessageDefault
else:
type_checker = type_checkers.GetTypeChecker(field.cpp_type, field.type)
def MakeRepeatedScalarDefault(message):
return containers.RepeatedScalarFieldContainer(
message._listener_for_children, type_checker)
return MakeRepeatedScalarDefault
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
# _concrete_class may not yet be initialized.
message_type = field.message_type
def MakeSubMessageDefault(message):
result = message_type._concrete_class()
result._SetListener(message._listener_for_children)
return result
return MakeSubMessageDefault
def MakeScalarDefault(message):
return field.default_value
return MakeScalarDefault
def _AddInitMethod(message_descriptor, cls):
"""Adds an __init__ method to cls."""
fields = message_descriptor.fields
def init(self, **kwargs):
self._cached_byte_size = 0
self._cached_byte_size_dirty = len(kwargs) > 0
self._fields = {}
self._is_present_in_parent = False
self._listener = message_listener_mod.NullMessageListener()
self._listener_for_children = _Listener(self)
for field_name, field_value in kwargs.iteritems():
field = _GetFieldByName(message_descriptor, field_name)
if field is None:
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(message_descriptor.name, field_name))
if field.label == _FieldDescriptor.LABEL_REPEATED:
copy = field._default_constructor(self)
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: # Composite
for val in field_value:
copy.add().MergeFrom(val)
else: # Scalar
copy.extend(field_value)
self._fields[field] = copy
elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
copy = field._default_constructor(self)
copy.MergeFrom(field_value)
self._fields[field] = copy
else:
setattr(self, field_name, field_value)
init.__module__ = None
init.__doc__ = None
cls.__init__ = init
def _GetFieldByName(message_descriptor, field_name):
"""Returns a field descriptor by field name.
Args:
message_descriptor: A Descriptor describing all fields in message.
field_name: The name of the field to retrieve.
Returns:
The field descriptor associated with the field name.
"""
try:
return message_descriptor.fields_by_name[field_name]
except KeyError:
raise ValueError('Protocol message has no "%s" field.' % field_name)
def _AddPropertiesForFields(descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
for field in descriptor.fields:
_AddPropertiesForField(field, cls)
if descriptor.is_extendable:
# _ExtensionDict is just an adaptor with no state so we allocate a new one
# every time it is accessed.
cls.Extensions = property(lambda self: _ExtensionDict(self))
def _AddPropertiesForField(field, cls):
"""Adds a public property for a protocol message field.
Clients can use this property to get and (in the case
of non-repeated scalar fields) directly set the value
of a protocol message field.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
# Catch it if we add other types that we should
# handle specially here.
assert _FieldDescriptor.MAX_CPPTYPE == 10
constant_name = field.name.upper() + "_FIELD_NUMBER"
setattr(cls, constant_name, field.number)
if field.label == _FieldDescriptor.LABEL_REPEATED:
_AddPropertiesForRepeatedField(field, cls)
elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
_AddPropertiesForNonRepeatedCompositeField(field, cls)
else:
_AddPropertiesForNonRepeatedScalarField(field, cls)
def _AddPropertiesForRepeatedField(field, cls):
"""Adds a public property for a "repeated" protocol message field. Clients
can use this property to get the value of the field, which will be either a
_RepeatedScalarFieldContainer or _RepeatedCompositeFieldContainer (see
below).
Note that when clients add values to these containers, we perform
type-checking in the case of repeated scalar fields, and we also set any
necessary "has" bits as a side-effect.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
def getter(self):
field_value = self._fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = field._default_constructor(self)
# Atomically check if another thread has preempted us and, if not, swap
# in the new object we just created. If someone has preempted us, we
# take that object and discard ours.
# WARNING: We are relying on setdefault() being atomic. This is true
# in CPython but we haven't investigated others. This warning appears
# in several other locations in this file.
field_value = self._fields.setdefault(field, field_value)
return field_value
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
# We define a setter just so we can throw an exception with a more
# helpful error message.
def setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % proto_field_name)
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForNonRepeatedScalarField(field, cls):
"""Adds a public property for a nonrepeated, scalar protocol message field.
Clients can use this property to get and directly set the value of the field.
Note that when the client sets the value of a field by using this property,
all necessary "has" bits are set as a side-effect, and we also perform
type-checking.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
type_checker = type_checkers.GetTypeChecker(field.cpp_type, field.type)
default_value = field.default_value
valid_values = set()
def getter(self):
return self._fields.get(field, default_value)
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
def setter(self, new_value):
type_checker.CheckValue(new_value)
self._fields[field] = new_value
# Check _cached_byte_size_dirty inline to improve performance, since scalar
# setters are called frequently.
if not self._cached_byte_size_dirty:
self._Modified()
setter.__module__ = None
setter.__doc__ = 'Setter for %s.' % proto_field_name
# Add a property to encapsulate the getter/setter.
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForNonRepeatedCompositeField(field, cls):
"""Adds a public property for a nonrepeated, composite protocol message field.
A composite field is a "group" or "message" field.
Clients can use this property to get the value of the field, but cannot
assign to the property directly.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
# TODO(robinson): Remove duplication with similar method
# for non-repeated scalars.
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
message_type = field.message_type
def getter(self):
field_value = self._fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = message_type._concrete_class()
field_value._SetListener(self._listener_for_children)
# Atomically check if another thread has preempted us and, if not, swap
# in the new object we just created. If someone has preempted us, we
# take that object and discard ours.
# WARNING: We are relying on setdefault() being atomic. This is true
# in CPython but we haven't investigated others. This warning appears
# in several other locations in this file.
field_value = self._fields.setdefault(field, field_value)
return field_value
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
# We define a setter just so we can throw an exception with a more
# helpful error message.
def setter(self, new_value):
raise AttributeError('Assignment not allowed to composite field '
'"%s" in protocol message object.' % proto_field_name)
# Add a property to encapsulate the getter.
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForExtensions(descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
extension_dict = descriptor.extensions_by_name
for extension_name, extension_field in extension_dict.iteritems():
constant_name = extension_name.upper() + "_FIELD_NUMBER"
setattr(cls, constant_name, extension_field.number)
def _AddStaticMethods(cls):
# TODO(robinson): This probably needs to be thread-safe(?)
def RegisterExtension(extension_handle):
extension_handle.containing_type = cls.DESCRIPTOR
_AttachFieldHelpers(cls, extension_handle)
# Try to insert our extension, failing if an extension with the same number
# already exists.
actual_handle = cls._extensions_by_number.setdefault(
extension_handle.number, extension_handle)
if actual_handle is not extension_handle:
raise AssertionError(
'Extensions "%s" and "%s" both try to extend message type "%s" with '
'field number %d.' %
(extension_handle.full_name, actual_handle.full_name,
cls.DESCRIPTOR.full_name, extension_handle.number))
cls._extensions_by_name[extension_handle.full_name] = extension_handle
handle = extension_handle # avoid line wrapping
if _IsMessageSetExtension(handle):
# MessageSet extension. Also register under type name.
cls._extensions_by_name[
extension_handle.message_type.full_name] = extension_handle
cls.RegisterExtension = staticmethod(RegisterExtension)
def FromString(s):
message = cls()
message.MergeFromString(s)
return message
cls.FromString = staticmethod(FromString)
def _IsPresent(item):
"""Given a (FieldDescriptor, value) tuple from _fields, return true if the
value should be included in the list returned by ListFields()."""
if item[0].label == _FieldDescriptor.LABEL_REPEATED:
return bool(item[1])
elif item[0].cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
return item[1]._is_present_in_parent
else:
return True
def _AddListFieldsMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ListFields(self):
all_fields = [item for item in self._fields.iteritems() if _IsPresent(item)]
all_fields.sort(key = lambda item: item[0].number)
return all_fields
cls.ListFields = ListFields
def _AddHasFieldMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
singular_fields = {}
for field in message_descriptor.fields:
if field.label != _FieldDescriptor.LABEL_REPEATED:
singular_fields[field.name] = field
def HasField(self, field_name):
try:
field = singular_fields[field_name]
except KeyError:
raise ValueError(
'Protocol message has no singular "%s" field.' % field_name)
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
value = self._fields.get(field)
return value is not None and value._is_present_in_parent
else:
return field in self._fields
cls.HasField = HasField
def _AddClearFieldMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ClearField(self, field_name):
try:
field = message_descriptor.fields_by_name[field_name]
except KeyError:
raise ValueError('Protocol message has no "%s" field.' % field_name)
if field in self._fields:
# Note: If the field is a sub-message, its listener will still point
# at us. That's fine, because the worst than can happen is that it
# will call _Modified() and invalidate our byte size. Big deal.
del self._fields[field]
# Always call _Modified() -- even if nothing was changed, this is
# a mutating method, and thus calling it should cause the field to become
# present in the parent message.
self._Modified()
cls.ClearField = ClearField
def _AddClearExtensionMethod(cls):
"""Helper for _AddMessageMethods()."""
def ClearExtension(self, extension_handle):
_VerifyExtensionHandle(self, extension_handle)
# Similar to ClearField(), above.
if extension_handle in self._fields:
del self._fields[extension_handle]
self._Modified()
cls.ClearExtension = ClearExtension
def _AddClearMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def Clear(self):
# Clear fields.
self._fields = {}
self._Modified()
cls.Clear = Clear
def _AddHasExtensionMethod(cls):
"""Helper for _AddMessageMethods()."""
def HasExtension(self, extension_handle):
_VerifyExtensionHandle(self, extension_handle)
if extension_handle.label == _FieldDescriptor.LABEL_REPEATED:
raise KeyError('"%s" is repeated.' % extension_handle.full_name)
if extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
value = self._fields.get(extension_handle)
return value is not None and value._is_present_in_parent
else:
return extension_handle in self._fields
cls.HasExtension = HasExtension
def _AddEqualsMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __eq__(self, other):
if (not isinstance(other, message_mod.Message) or
other.DESCRIPTOR != self.DESCRIPTOR):
return False
if self is other:
return True
return self.ListFields() == other.ListFields()
cls.__eq__ = __eq__
def _AddStrMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __str__(self):
return text_format.MessageToString(self)
cls.__str__ = __str__
def _AddUnicodeMethod(unused_message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __unicode__(self):
return text_format.MessageToString(self, as_utf8=True).decode('utf-8')
cls.__unicode__ = __unicode__
def _AddSetListenerMethod(cls):
"""Helper for _AddMessageMethods()."""
def SetListener(self, listener):
if listener is None:
self._listener = message_listener_mod.NullMessageListener()
else:
self._listener = listener
cls._SetListener = SetListener
def _BytesForNonRepeatedElement(value, field_number, field_type):
"""Returns the number of bytes needed to serialize a non-repeated element.
The returned byte count includes space for tag information and any
other additional space associated with serializing value.
Args:
value: Value we're serializing.
field_number: Field number of this value. (Since the field number
is stored as part of a varint-encoded tag, this has an impact
on the total bytes required to serialize the value).
field_type: The type of the field. One of the TYPE_* constants
within FieldDescriptor.
"""
try:
fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type]
return fn(field_number, value)
except KeyError:
raise message_mod.EncodeError('Unrecognized field type: %d' % field_type)
def _AddByteSizeMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ByteSize(self):
if not self._cached_byte_size_dirty:
return self._cached_byte_size
size = 0
for field_descriptor, field_value in self.ListFields():
size += field_descriptor._sizer(field_value)
self._cached_byte_size = size
self._cached_byte_size_dirty = False
self._listener_for_children.dirty = False
return size
cls.ByteSize = ByteSize
def _AddSerializeToStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def SerializeToString(self):
# Check if the message has all of its required fields set.
errors = []
if not self.IsInitialized():
raise message_mod.EncodeError(
'Message is missing required fields: ' +
','.join(self.FindInitializationErrors()))
return self.SerializePartialToString()
cls.SerializeToString = SerializeToString
def _AddSerializePartialToStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def SerializePartialToString(self):
out = StringIO()
self._InternalSerialize(out.write)
return out.getvalue()
cls.SerializePartialToString = SerializePartialToString
def InternalSerialize(self, write_bytes):
for field_descriptor, field_value in self.ListFields():
field_descriptor._encoder(write_bytes, field_value)
cls._InternalSerialize = InternalSerialize
def _AddMergeFromStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def MergeFromString(self, serialized):
length = len(serialized)
try:
if self._InternalParse(serialized, 0, length) != length:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise message_mod.DecodeError('Unexpected end-group tag.')
except IndexError:
raise message_mod.DecodeError('Truncated message.')
except struct.error, e:
raise message_mod.DecodeError(e)
return length # Return this for legacy reasons.
cls.MergeFromString = MergeFromString
local_ReadTag = decoder.ReadTag
local_SkipField = decoder.SkipField
decoders_by_tag = cls._decoders_by_tag
def InternalParse(self, buffer, pos, end):
self._Modified()
field_dict = self._fields
while pos != end:
(tag_bytes, new_pos) = local_ReadTag(buffer, pos)
field_decoder = decoders_by_tag.get(tag_bytes)
if field_decoder is None:
new_pos = local_SkipField(buffer, new_pos, end, tag_bytes)
if new_pos == -1:
return pos
pos = new_pos
else:
pos = field_decoder(buffer, new_pos, end, self, field_dict)
return pos
cls._InternalParse = InternalParse
def _AddIsInitializedMethod(message_descriptor, cls):
"""Adds the IsInitialized and FindInitializationError methods to the
protocol message class."""
required_fields = [field for field in message_descriptor.fields
if field.label == _FieldDescriptor.LABEL_REQUIRED]
def IsInitialized(self, errors=None):
"""Checks if all required fields of a message are set.
Args:
errors: A list which, if provided, will be populated with the field
paths of all missing required fields.
Returns:
True iff the specified message has all required fields set.
"""
# Performance is critical so we avoid HasField() and ListFields().
for field in required_fields:
if (field not in self._fields or
(field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE and
not self._fields[field]._is_present_in_parent)):
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
for field, value in self._fields.iteritems():
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.label == _FieldDescriptor.LABEL_REPEATED:
for element in value:
if not element.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
elif value._is_present_in_parent and not value.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
return True
cls.IsInitialized = IsInitialized
def FindInitializationErrors(self):
"""Finds required fields which are not initialized.
Returns:
A list of strings. Each string is a path to an uninitialized field from
the top-level message, e.g. "foo.bar[5].baz".
"""
errors = [] # simplify things
for field in required_fields:
if not self.HasField(field.name):
errors.append(field.name)
for field, value in self.ListFields():
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.is_extension:
name = "(%s)" % field.full_name
else:
name = field.name
if field.label == _FieldDescriptor.LABEL_REPEATED:
for i in xrange(len(value)):
element = value[i]
prefix = "%s[%d]." % (name, i)
sub_errors = element.FindInitializationErrors()
errors += [ prefix + error for error in sub_errors ]
else:
prefix = name + "."
sub_errors = value.FindInitializationErrors()
errors += [ prefix + error for error in sub_errors ]
return errors
cls.FindInitializationErrors = FindInitializationErrors
def _AddMergeFromMethod(cls):
LABEL_REPEATED = _FieldDescriptor.LABEL_REPEATED
CPPTYPE_MESSAGE = _FieldDescriptor.CPPTYPE_MESSAGE
def MergeFrom(self, msg):
if not isinstance(msg, cls):
raise TypeError(
"Parameter to MergeFrom() must be instance of same class.")
assert msg is not self
self._Modified()
fields = self._fields
for field, value in msg._fields.iteritems():
if field.label == LABEL_REPEATED:
field_value = fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = field._default_constructor(self)
fields[field] = field_value
field_value.MergeFrom(value)
elif field.cpp_type == CPPTYPE_MESSAGE:
if value._is_present_in_parent:
field_value = fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = field._default_constructor(self)
fields[field] = field_value
field_value.MergeFrom(value)
else:
self._fields[field] = value
cls.MergeFrom = MergeFrom
def _AddMessageMethods(message_descriptor, cls):
"""Adds implementations of all Message methods to cls."""
_AddListFieldsMethod(message_descriptor, cls)
_AddHasFieldMethod(message_descriptor, cls)
_AddClearFieldMethod(message_descriptor, cls)
if message_descriptor.is_extendable:
_AddClearExtensionMethod(cls)
_AddHasExtensionMethod(cls)
_AddClearMethod(message_descriptor, cls)
_AddEqualsMethod(message_descriptor, cls)
_AddStrMethod(message_descriptor, cls)
_AddUnicodeMethod(message_descriptor, cls)
_AddSetListenerMethod(cls)
_AddByteSizeMethod(message_descriptor, cls)
_AddSerializeToStringMethod(message_descriptor, cls)
_AddSerializePartialToStringMethod(message_descriptor, cls)
_AddMergeFromStringMethod(message_descriptor, cls)
_AddIsInitializedMethod(message_descriptor, cls)
_AddMergeFromMethod(cls)
def _AddPrivateHelperMethods(cls):
"""Adds implementation of private helper methods to cls."""
def Modified(self):
"""Sets the _cached_byte_size_dirty bit to true,
and propagates this to our listener iff this was a state change.
"""
# Note: Some callers check _cached_byte_size_dirty before calling
# _Modified() as an extra optimization. So, if this method is ever
# changed such that it does stuff even when _cached_byte_size_dirty is
# already true, the callers need to be updated.
if not self._cached_byte_size_dirty:
self._cached_byte_size_dirty = True
self._listener_for_children.dirty = True
self._is_present_in_parent = True
self._listener.Modified()
cls._Modified = Modified
cls.SetInParent = Modified
class _Listener(object):
"""MessageListener implementation that a parent message registers with its
child message.
In order to support semantics like:
foo.bar.baz.qux = 23
assert foo.HasField('bar')
...child objects must have back references to their parents.
This helper class is at the heart of this support.
"""
def __init__(self, parent_message):
"""Args:
parent_message: The message whose _Modified() method we should call when
we receive Modified() messages.
"""
# This listener establishes a back reference from a child (contained) object
# to its parent (containing) object. We make this a weak reference to avoid
# creating cyclic garbage when the client finishes with the 'parent' object
# in the tree.
if isinstance(parent_message, weakref.ProxyType):
self._parent_message_weakref = parent_message
else:
self._parent_message_weakref = weakref.proxy(parent_message)
# As an optimization, we also indicate directly on the listener whether
# or not the parent message is dirty. This way we can avoid traversing
# up the tree in the common case.
self.dirty = False
def Modified(self):
if self.dirty:
return
try:
# Propagate the signal to our parents iff this is the first field set.
self._parent_message_weakref._Modified()
except ReferenceError:
# We can get here if a client has kept a reference to a child object,
# and is now setting a field on it, but the child's parent has been
# garbage-collected. This is not an error.
pass
# TODO(robinson): Move elsewhere? This file is getting pretty ridiculous...
# TODO(robinson): Unify error handling of "unknown extension" crap.
# TODO(robinson): Support iteritems()-style iteration over all
# extensions with the "has" bits turned on?
class _ExtensionDict(object):
"""Dict-like container for supporting an indexable "Extensions"
field on proto instances.
Note that in all cases we expect extension handles to be
FieldDescriptors.
"""
def __init__(self, extended_message):
"""extended_message: Message instance for which we are the Extensions dict.
"""
self._extended_message = extended_message
def __getitem__(self, extension_handle):
"""Returns the current value of the given extension handle."""
_VerifyExtensionHandle(self._extended_message, extension_handle)
result = self._extended_message._fields.get(extension_handle)
if result is not None:
return result
if extension_handle.label == _FieldDescriptor.LABEL_REPEATED:
result = extension_handle._default_constructor(self._extended_message)
elif extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
result = extension_handle.message_type._concrete_class()
try:
result._SetListener(self._extended_message._listener_for_children)
except ReferenceError:
pass
else:
# Singular scalar -- just return the default without inserting into the
# dict.
return extension_handle.default_value
# Atomically check if another thread has preempted us and, if not, swap
# in the new object we just created. If someone has preempted us, we
# take that object and discard ours.
# WARNING: We are relying on setdefault() being atomic. This is true
# in CPython but we haven't investigated others. This warning appears
# in several other locations in this file.
result = self._extended_message._fields.setdefault(
extension_handle, result)
return result
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
my_fields = self._extended_message.ListFields()
other_fields = other._extended_message.ListFields()
# Get rid of non-extension fields.
my_fields = [ field for field in my_fields if field.is_extension ]
other_fields = [ field for field in other_fields if field.is_extension ]
return my_fields == other_fields
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
# Note that this is only meaningful for non-repeated, scalar extension
# fields. Note also that we may have to call _Modified() when we do
# successfully set a field this way, to set any necssary "has" bits in the
# ancestors of the extended message.
def __setitem__(self, extension_handle, value):
"""If extension_handle specifies a non-repeated, scalar extension
field, sets the value of that field.
"""
_VerifyExtensionHandle(self._extended_message, extension_handle)
if (extension_handle.label == _FieldDescriptor.LABEL_REPEATED or
extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE):
raise TypeError(
'Cannot assign to extension "%s" because it is a repeated or '
'composite type.' % extension_handle.full_name)
# It's slightly wasteful to lookup the type checker each time,
# but we expect this to be a vanishingly uncommon case anyway.
type_checker = type_checkers.GetTypeChecker(
extension_handle.cpp_type, extension_handle.type)
type_checker.CheckValue(value)
self._extended_message._fields[extension_handle] = value
self._extended_message._Modified()
def _FindExtensionByName(self, name):
"""Tries to find a known extension with the specified name.
Args:
name: Extension full name.
Returns:
Extension field descriptor.
"""
return self._extended_message._extensions_by_name.get(name, None)
| bsd-3-clause |
OSSystems/glmark2 | waflib/Tools/qt4.py | 12 | 14141 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import sys
if sys.hexversion < 0x020400f0: from sets import Set as set
try:
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
except ImportError:
has_xml=False
ContentHandler=object
else:
has_xml=True
import os,sys
from waflib.Tools import c_preproc,cxx
from waflib import Task,Utils,Options,Errors
from waflib.TaskGen import feature,after_method,extension
from waflib.Configure import conf
from waflib import Logs
MOC_H=['.h','.hpp','.hxx','.hh']
EXT_RCC=['.qrc']
EXT_UI=['.ui']
EXT_QT4=['.cpp','.cc','.cxx','.C']
QT4_LIBS="QtCore QtGui QtUiTools QtNetwork QtOpenGL QtSql QtSvg QtTest QtXml QtXmlPatterns QtWebKit Qt3Support QtHelp QtScript QtDeclarative"
class qxx(cxx.cxx):
def __init__(self,*k,**kw):
Task.Task.__init__(self,*k,**kw)
self.moc_done=0
def scan(self):
(nodes,names)=c_preproc.scan(self)
for x in nodes:
if x.name.endswith('.moc'):
nodes.remove(x)
names.append(x.path_from(self.inputs[0].parent.get_bld()))
return(nodes,names)
def runnable_status(self):
if self.moc_done:
return Task.Task.runnable_status(self)
else:
for t in self.run_after:
if not t.hasrun:
return Task.ASK_LATER
self.add_moc_tasks()
return Task.Task.runnable_status(self)
def add_moc_tasks(self):
node=self.inputs[0]
bld=self.generator.bld
try:
self.signature()
except KeyError:
pass
else:
delattr(self,'cache_sig')
moctasks=[]
mocfiles=[]
try:
tmp_lst=bld.raw_deps[self.uid()]
bld.raw_deps[self.uid()]=[]
except KeyError:
tmp_lst=[]
for d in tmp_lst:
if not d.endswith('.moc'):
continue
if d in mocfiles:
Logs.error("paranoia owns")
continue
mocfiles.append(d)
h_node=None
try:ext=Options.options.qt_header_ext.split()
except AttributeError:pass
if not ext:ext=MOC_H
base2=d[:-4]
for x in[node.parent]+self.generator.includes_nodes:
for e in ext:
h_node=x.find_node(base2+e)
if h_node:
break
if h_node:
m_node=h_node.change_ext('.moc')
break
else:
for k in EXT_QT4:
if base2.endswith(k):
for x in[node.parent]+self.generator.includes_nodes:
h_node=x.find_node(base2)
if h_node:
break
if h_node:
m_node=h_node.change_ext(k+'.moc')
break
if not h_node:
raise Errors.WafError('no header found for %r which is a moc file'%d)
bld.node_deps[(self.inputs[0].parent.abspath(),m_node.name)]=h_node
task=Task.classes['moc'](env=self.env,generator=self.generator)
task.set_inputs(h_node)
task.set_outputs(m_node)
gen=bld.producer
gen.outstanding.insert(0,task)
gen.total+=1
moctasks.append(task)
tmp_lst=bld.raw_deps[self.uid()]=mocfiles
lst=bld.node_deps.get(self.uid(),())
for d in lst:
name=d.name
if name.endswith('.moc'):
task=Task.classes['moc'](env=self.env,generator=self.generator)
task.set_inputs(bld.node_deps[(self.inputs[0].parent.abspath(),name)])
task.set_outputs(d)
gen=bld.producer
gen.outstanding.insert(0,task)
gen.total+=1
moctasks.append(task)
self.run_after.update(set(moctasks))
self.moc_done=1
run=Task.classes['cxx'].__dict__['run']
class trans_update(Task.Task):
run_str='${QT_LUPDATE} ${SRC} -ts ${TGT}'
color='BLUE'
Task.update_outputs(trans_update)
class XMLHandler(ContentHandler):
def __init__(self):
self.buf=[]
self.files=[]
def startElement(self,name,attrs):
if name=='file':
self.buf=[]
def endElement(self,name):
if name=='file':
self.files.append(str(''.join(self.buf)))
def characters(self,cars):
self.buf.append(cars)
def create_rcc_task(self,node):
rcnode=node.change_ext('_rc.cpp')
rcctask=self.create_task('rcc',node,rcnode)
cpptask=self.create_task('cxx',rcnode,rcnode.change_ext('.o'))
try:
self.compiled_tasks.append(cpptask)
except AttributeError:
self.compiled_tasks=[cpptask]
return cpptask
def create_uic_task(self,node):
uictask=self.create_task('ui4',node)
uictask.outputs=[self.path.find_or_declare(self.env['ui_PATTERN']%node.name[:-3])]
def add_lang(self,node):
self.lang=self.to_list(getattr(self,'lang',[]))+[node]
def apply_qt4(self):
if getattr(self,'lang',None):
qmtasks=[]
for x in self.to_list(self.lang):
if isinstance(x,str):
x=self.path.find_resource(x+'.ts')
qmtasks.append(self.create_task('ts2qm',x,x.change_ext('.qm')))
if getattr(self,'update',None)and Options.options.trans_qt4:
cxxnodes=[a.inputs[0]for a in self.compiled_tasks]+[a.inputs[0]for a in self.tasks if getattr(a,'inputs',None)and a.inputs[0].name.endswith('.ui')]
for x in qmtasks:
self.create_task('trans_update',cxxnodes,x.inputs)
if getattr(self,'langname',None):
qmnodes=[x.outputs[0]for x in qmtasks]
rcnode=self.langname
if isinstance(rcnode,str):
rcnode=self.path.find_or_declare(rcnode+'.qrc')
t=self.create_task('qm2rcc',qmnodes,rcnode)
k=create_rcc_task(self,t.outputs[0])
self.link_task.inputs.append(k.outputs[0])
lst=[]
for flag in self.to_list(self.env['CXXFLAGS']):
if len(flag)<2:continue
f=flag[0:2]
if f in['-D','-I','/D','/I']:
if(f[0]=='/'):
lst.append('-'+flag[1:])
else:
lst.append(flag)
self.env['MOC_FLAGS']=lst
def cxx_hook(self,node):
return self.create_compiled_task('qxx',node)
class rcc(Task.Task):
color='BLUE'
run_str='${QT_RCC} -name ${SRC[0].name} ${SRC[0].abspath()} ${RCC_ST} -o ${TGT}'
ext_out=['.h']
def scan(self):
node=self.inputs[0]
if not has_xml:
Logs.error('no xml support was found, the rcc dependencies will be incomplete!')
return([],[])
parser=make_parser()
curHandler=XMLHandler()
parser.setContentHandler(curHandler)
fi=open(self.inputs[0].abspath())
parser.parse(fi)
fi.close()
nodes=[]
names=[]
root=self.inputs[0].parent
for x in curHandler.files:
nd=root.find_resource(x)
if nd:nodes.append(nd)
else:names.append(x)
return(nodes,names)
class moc(Task.Task):
color='BLUE'
run_str='${QT_MOC} ${MOC_FLAGS} ${MOCCPPPATH_ST:INCPATHS} ${MOCDEFINES_ST:DEFINES} ${SRC} ${MOC_ST} ${TGT}'
class ui4(Task.Task):
color='BLUE'
run_str='${QT_UIC} ${SRC} -o ${TGT}'
ext_out=['.h']
class ts2qm(Task.Task):
color='BLUE'
run_str='${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}'
class qm2rcc(Task.Task):
color='BLUE'
after='ts2qm'
def run(self):
txt='\n'.join(['<file>%s</file>'%k.path_from(self.outputs[0].parent)for k in self.inputs])
code='<!DOCTYPE RCC><RCC version="1.0">\n<qresource>\n%s\n</qresource>\n</RCC>'%txt
self.outputs[0].write(code)
def configure(self):
self.find_qt4_binaries()
self.set_qt4_libs_to_check()
self.find_qt4_libraries()
self.add_qt4_rpath()
self.simplify_qt4_libs()
def find_qt4_binaries(self):
env=self.env
opt=Options.options
qtdir=getattr(opt,'qtdir','')
qtbin=getattr(opt,'qtbin','')
paths=[]
if qtdir:
qtbin=os.path.join(qtdir,'bin')
if not qtdir:
qtdir=self.environ.get('QT4_ROOT','')
qtbin=os.path.join(qtdir,'bin')
if qtbin:
paths=[qtbin]
if not qtdir:
paths=os.environ.get('PATH','').split(os.pathsep)
paths.append('/usr/share/qt4/bin/')
try:
lst=Utils.listdir('/usr/local/Trolltech/')
except OSError:
pass
else:
if lst:
lst.sort()
lst.reverse()
qtdir='/usr/local/Trolltech/%s/'%lst[0]
qtbin=os.path.join(qtdir,'bin')
paths.append(qtbin)
cand=None
prev_ver=['4','0','0']
for qmk in['qmake-qt4','qmake4','qmake']:
try:
qmake=self.find_program(qmk,path_list=paths)
except self.errors.ConfigurationError:
pass
else:
try:
version=self.cmd_and_log([qmake,'-query','QT_VERSION']).strip()
except self.errors.ConfigurationError:
pass
else:
if version:
new_ver=version.split('.')
if new_ver>prev_ver:
cand=qmake
prev_ver=new_ver
if cand:
self.env.QMAKE=cand
else:
self.fatal('Could not find qmake for qt4')
qtbin=self.cmd_and_log([self.env.QMAKE,'-query','QT_INSTALL_BINS']).strip()+os.sep
def find_bin(lst,var):
for f in lst:
try:
ret=self.find_program(f,path_list=paths)
except self.errors.ConfigurationError:
pass
else:
env[var]=ret
break
find_bin(['uic-qt3','uic3'],'QT_UIC3')
find_bin(['uic-qt4','uic'],'QT_UIC')
if not env['QT_UIC']:
self.fatal('cannot find the uic compiler for qt4')
try:
uicver=self.cmd_and_log(env['QT_UIC']+" -version 2>&1").strip()
except self.errors.ConfigurationError:
self.fatal('this uic compiler is for qt3, add uic for qt4 to your path')
uicver=uicver.replace('Qt User Interface Compiler ','').replace('User Interface Compiler for Qt','')
self.msg('Checking for uic version','%s'%uicver)
if uicver.find(' 3.')!=-1:
self.fatal('this uic compiler is for qt3, add uic for qt4 to your path')
find_bin(['moc-qt4','moc'],'QT_MOC')
find_bin(['rcc'],'QT_RCC')
find_bin(['lrelease-qt4','lrelease'],'QT_LRELEASE')
find_bin(['lupdate-qt4','lupdate'],'QT_LUPDATE')
env['UIC3_ST']='%s -o %s'
env['UIC_ST']='%s -o %s'
env['MOC_ST']='-o'
env['ui_PATTERN']='ui_%s.h'
env['QT_LRELEASE_FLAGS']=['-silent']
env.MOCCPPPATH_ST='-I%s'
env.MOCDEFINES_ST='-D%s'
def find_qt4_libraries(self):
qtlibs=getattr(Options.options,'qtlibs','')
if not qtlibs:
try:
qtlibs=self.cmd_and_log([self.env.QMAKE,'-query','QT_INSTALL_LIBS']).strip()
except Errors.WafError:
qtdir=self.cmd_and_log([self.env.QMAKE,'-query','QT_INSTALL_PREFIX']).strip()+os.sep
qtlibs=os.path.join(qtdir,'lib')
self.msg('Found the Qt4 libraries in',qtlibs)
qtincludes=self.cmd_and_log([self.env.QMAKE,'-query','QT_INSTALL_HEADERS']).strip()
env=self.env
if not'PKG_CONFIG_PATH'in os.environ:
os.environ['PKG_CONFIG_PATH']='%s:%s/pkgconfig:/usr/lib/qt4/lib/pkgconfig:/opt/qt4/lib/pkgconfig:/usr/lib/qt4/lib:/opt/qt4/lib'%(qtlibs,qtlibs)
try:
self.check_cfg(atleast_pkgconfig_version='0.1')
except self.errors.ConfigurationError:
for i in self.qt4_vars:
uselib=i.upper()
if Utils.unversioned_sys_platform()=="darwin":
frameworkName=i+".framework"
qtDynamicLib=os.path.join(qtlibs,frameworkName,i)
if os.path.exists(qtDynamicLib):
env.append_unique('FRAMEWORK_'+uselib,i)
self.msg('Checking for %s'%i,qtDynamicLib,'GREEN')
else:
self.msg('Checking for %s'%i,False,'YELLOW')
env.append_unique('INCLUDES_'+uselib,os.path.join(qtlibs,frameworkName,'Headers'))
elif sys.platform!="win32":
qtDynamicLib=os.path.join(qtlibs,"lib"+i+".so")
qtStaticLib=os.path.join(qtlibs,"lib"+i+".a")
if os.path.exists(qtDynamicLib):
env.append_unique('LIB_'+uselib,i)
self.msg('Checking for %s'%i,qtDynamicLib,'GREEN')
elif os.path.exists(qtStaticLib):
env.append_unique('LIB_'+uselib,i)
self.msg('Checking for %s'%i,qtStaticLib,'GREEN')
else:
self.msg('Checking for %s'%i,False,'YELLOW')
env.append_unique('LIBPATH_'+uselib,qtlibs)
env.append_unique('INCLUDES_'+uselib,qtincludes)
env.append_unique('INCLUDES_'+uselib,os.path.join(qtincludes,i))
else:
for k in("lib%s.a","lib%s4.a","%s.lib","%s4.lib"):
lib=os.path.join(qtlibs,k%i)
if os.path.exists(lib):
env.append_unique('LIB_'+uselib,i+k[k.find("%s")+2:k.find('.')])
self.msg('Checking for %s'%i,lib,'GREEN')
break
else:
self.msg('Checking for %s'%i,False,'YELLOW')
env.append_unique('LIBPATH_'+uselib,qtlibs)
env.append_unique('INCLUDES_'+uselib,qtincludes)
env.append_unique('INCLUDES_'+uselib,os.path.join(qtincludes,i))
uselib=i.upper()+"_debug"
for k in("lib%sd.a","lib%sd4.a","%sd.lib","%sd4.lib"):
lib=os.path.join(qtlibs,k%i)
if os.path.exists(lib):
env.append_unique('LIB_'+uselib,i+k[k.find("%s")+2:k.find('.')])
self.msg('Checking for %s'%i,lib,'GREEN')
break
else:
self.msg('Checking for %s'%i,False,'YELLOW')
env.append_unique('LIBPATH_'+uselib,qtlibs)
env.append_unique('INCLUDES_'+uselib,qtincludes)
env.append_unique('INCLUDES_'+uselib,os.path.join(qtincludes,i))
else:
for i in self.qt4_vars_debug+self.qt4_vars:
self.check_cfg(package=i,args='--cflags --libs',mandatory=False)
def simplify_qt4_libs(self):
env=self.env
def process_lib(vars_,coreval):
for d in vars_:
var=d.upper()
if var=='QTCORE':
continue
value=env['LIBPATH_'+var]
if value:
core=env[coreval]
accu=[]
for lib in value:
if lib in core:
continue
accu.append(lib)
env['LIBPATH_'+var]=accu
process_lib(self.qt4_vars,'LIBPATH_QTCORE')
process_lib(self.qt4_vars_debug,'LIBPATH_QTCORE_DEBUG')
def add_qt4_rpath(self):
env=self.env
if Options.options.want_rpath:
def process_rpath(vars_,coreval):
for d in vars_:
var=d.upper()
value=env['LIBPATH_'+var]
if value:
core=env[coreval]
accu=[]
for lib in value:
if var!='QTCORE':
if lib in core:
continue
accu.append('-Wl,--rpath='+lib)
env['RPATH_'+var]=accu
process_rpath(self.qt4_vars,'LIBPATH_QTCORE')
process_rpath(self.qt4_vars_debug,'LIBPATH_QTCORE_DEBUG')
def set_qt4_libs_to_check(self):
if not hasattr(self,'qt4_vars'):
self.qt4_vars=QT4_LIBS
self.qt4_vars=Utils.to_list(self.qt4_vars)
if not hasattr(self,'qt4_vars_debug'):
self.qt4_vars_debug=[a+'_debug'for a in self.qt4_vars]
self.qt4_vars_debug=Utils.to_list(self.qt4_vars_debug)
def options(opt):
opt.add_option('--want-rpath',action='store_true',default=False,dest='want_rpath',help='enable the rpath for qt libraries')
opt.add_option('--header-ext',type='string',default='',help='header extension for moc files',dest='qt_header_ext')
for i in'qtdir qtbin qtlibs'.split():
opt.add_option('--'+i,type='string',default='',dest=i)
opt.add_option('--translate',action="store_true",help="collect translation strings",dest="trans_qt4",default=False)
extension(*EXT_RCC)(create_rcc_task)
extension(*EXT_UI)(create_uic_task)
extension('.ts')(add_lang)
feature('qt4')(apply_qt4)
after_method('apply_link')(apply_qt4)
extension(*EXT_QT4)(cxx_hook)
conf(find_qt4_binaries)
conf(find_qt4_libraries)
conf(simplify_qt4_libs)
conf(add_qt4_rpath)
conf(set_qt4_libs_to_check) | gpl-3.0 |
namlook/mongokit | mongokit/schema_document.py | 1 | 42677 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2011, Nicolas Clairon
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California, Berkeley nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import bson
import datetime
import logging
from copy import deepcopy
log = logging.getLogger(__name__)
from mongokit.operators import SchemaOperator, IS
from mongokit.helpers import DotCollapsedDict
from mongokit.helpers import DotExpandedDict
from mongokit.helpers import i18nDotedDict
from mongokit.helpers import DotedDict
__all__ = [
'AuthorizedTypeError',
'BadKeyError',
'CustomType',
'DefaultFieldTypeError',
'DotCollapsedDict',
'DotedDict',
'DotExpandedDict',
'DuplicateDefaultValueError',
'DuplicateRequiredError',
'i18n',
'i18nError',
'ModifierOperatorError',
'RequireFieldError',
'SchemaDocument',
'SchemaDocumentError',
'SchemaProperties',
'SchemaTypeError',
'Set',
'StructureError',
'ValidationError',
]
class CustomType(object):
init_type = None
mongo_type = None
python_type = None
def __init__(self):
if self.mongo_type is None:
raise TypeError("`mongo_type` property must be specify in %s" %
self.__class__.__name__)
if self.python_type is None:
raise TypeError("`python_type` property must be specify in %s" %
self.__class__.__name__)
def to_bson(self, value):
"""convert type to a mongodb type"""
raise NotImplementedError
def to_python(self, value):
"""convert type to a mongodb type"""
raise NotImplementedError
def validate(self, value, path):
"""
This method is optional. It add a validation layer.
This method is been called in Document.validate()
value: the value of the field
path: the field name (ie, 'foo' or 'foo.bar' if nested)
"""
pass
# field wich does not need to be declared into the structure
STRUCTURE_KEYWORDS = []
class SchemaDocumentError(Exception):
pass
class RequireFieldError(SchemaDocumentError):
pass
class StructureError(SchemaDocumentError):
pass
class BadKeyError(SchemaDocumentError):
pass
class AuthorizedTypeError(SchemaDocumentError):
pass
class ValidationError(SchemaDocumentError):
pass
class DuplicateRequiredError(SchemaDocumentError):
pass
class DuplicateDefaultValueError(SchemaDocumentError):
pass
class ModifierOperatorError(SchemaDocumentError):
pass
class SchemaTypeError(SchemaDocumentError):
pass
class DefaultFieldTypeError(SchemaDocumentError):
pass
class i18nError(SchemaDocumentError):
pass
class DeprecationError(Exception):
pass
class DuplicateI18nError(Exception):
pass
class SchemaProperties(type):
def __new__(mcs, name, bases, attrs):
attrs['_protected_field_names'] = set(
['_protected_field_names', '_namespaces', '_required_namespace'])
for base in bases:
parent = base.__mro__[0]
if not hasattr(parent, 'structure'):
continue
if parent.structure is not None:
#parent = parent()
if parent.structure:
if 'structure' not in attrs and parent.structure:
attrs['structure'] = parent.structure.copy()
else:
obj_structure = attrs.get('structure', {}).copy()
attrs['structure'] = parent.structure.copy()
attrs['structure'].update(obj_structure)
if parent.required_fields:
attrs['required_fields'] = list(set(
attrs.get('required_fields', [])+parent.required_fields))
if parent.default_values:
obj_default_values = attrs.get('default_values', {}).copy()
attrs['default_values'] = parent.default_values.copy()
attrs['default_values'].update(obj_default_values)
if parent.validators:
obj_validators = attrs.get('validators', {}).copy()
attrs['validators'] = parent.validators.copy()
attrs['validators'].update(obj_validators)
if parent.i18n:
attrs['i18n'] = list(set(
attrs.get('i18n', [])+parent.i18n))
if attrs.get('authorized_types'):
attrs['authorized_types'] = list(set(parent.authorized_types).union(set(attrs['authorized_types'])))
for mro in bases[0].__mro__:
attrs['_protected_field_names'] = attrs['_protected_field_names'].union(list(mro.__dict__))
attrs['_protected_field_names'] = list(attrs['_protected_field_names'])
if attrs.get('structure') and name not in \
["SchemaDocument", "Document", "VersionedDocument", "RevisionDocument"]:
base = bases[0]
if not attrs.get('authorized_types'):
attrs['authorized_types'] = base.authorized_types
base._validate_structure(attrs['structure'], name, attrs.get('authorized_types'))
attrs['_namespaces'] = list(base._SchemaDocument__walk_dict(attrs['structure']))
if [1 for i in attrs['_namespaces'] if type(i) is type]:
raise DeprecationError("%s: types are not allowed as structure key anymore" % name)
mcs._validate_descriptors(attrs)
## building required fields namespace
attrs['_required_namespace'] = set([])
for rf in attrs.get('required_fields', []):
splited_rf = rf.split('.')
for index in range(len(splited_rf)):
attrs['_required_namespace'].add(".".join(splited_rf[:index+1]))
attrs['_collapsed_struct'] = DotCollapsedDict(attrs['structure'], remove_under_type=True)
elif attrs.get('structure') is not None and name not in \
["SchemaDocument", "Document", "VersionedDocument", "RevisionDocument"]:
attrs['_collapsed_struct'] = {}
attrs['_i18n_namespace'] = []
if attrs.get('i18n'):
attrs['_i18n_namespace'] = set(['.'.join(i.split('.')[:-1]) for i in attrs['i18n']])
return type.__new__(mcs, name, bases, attrs)
@classmethod
def _validate_descriptors(mcs, attrs):
# TODO i18n validator
for dv in attrs.get('default_values', {}):
if not dv in attrs['_namespaces']:
raise ValueError("Error in default_values: can't find %s in structure" % dv)
for required in attrs.get('required_fields', []):
if required not in attrs['_namespaces']:
raise ValueError("Error in required_fields: can't find %s in structure" % required)
for validator in attrs.get('validators', {}):
if validator not in attrs['_namespaces']:
raise ValueError("Error in validators: can't find %s in structure" % validator)
# required_field
if attrs.get('required_fields'):
if len(attrs['required_fields']) != len(set(attrs['required_fields'])):
raise DuplicateRequiredError("duplicate required_fields : %s" % attrs['required_fields'])
# i18n
if attrs.get('i18n'):
if len(attrs['i18n']) != len(set(attrs['i18n'])):
raise DuplicateI18nError("duplicated i18n : %s" % attrs['i18n'])
for _i18n in attrs['i18n']:
if _i18n not in attrs['_namespaces']:
raise ValueError("Error in i18n: can't find {} in structure".format(_i18n))
class SchemaDocument(dict):
"""
A SchemaDocument is dictionary with a building structured schema
The validate method will check that the document match the underling
structure. A structure must be specify in each SchemaDocument.
>>> class TestDoc(SchemaDocument):
... structure = {
... "foo":unicode,
... "bar":int,
... "nested":{
... "bla":float}}
`unicode`, `int`, `float` are python types listed in `mongokit.authorized_types`.
>>> doc = TestDoc()
>>> doc
{'foo': None, 'bar': None, 'nested': {'bla': None}}
A SchemaDocument works just like dict:
>>> doc['bar'] = 3
>>> doc['foo'] = "test"
We can describe fields as required with the required attribute:
>>> TestDoc.required_fields = ['bar', 'nested.bla']
>>> doc = TestDoc()
>>> doc['bar'] = 2
Validation is made with the `validate()` method:
>>> doc.validate() # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
RequireFieldError: nested.bla is required
Default values can be set by using the attribute default_values :
>>> TestDoc.default_values = {"bar":3, "nested.bla":2.0}
>>> doc = TestDoc()
>>> doc
{'foo': None, 'bar': 3, 'nested': {'bla': 2.0}}
>>> doc.validate()
Validators can be added in order to validate some values :
>>> TestDoc.validators = {"bar":lambda x: x>0, "nested.bla": lambda x: x<0}
>>> doc = TestDoc()
>>> doc['bar'] = 3
>>> doc['nested']['bla'] = 2.0
>>> doc.validate()
Traceback (most recent call last):
...
ValidationError: nested.bla does not pass the validator <lambda>
If you want to use the dot notation (ala json), you must set the
`use_dot_notation` attribute to True:
>>> class TestDotNotation(SchemaDocument):
... structure = {
... "foo":{ "bar":unicode}
... }
... use_dot_notation=True
>>> doc = TestDotNotation()
>>> doc.foo.bar = u"bla"
>>> doc
{"foo":{"bar":u"bla}}
"""
__metaclass__ = SchemaProperties
structure = None
required_fields = []
default_values = {}
validators = {}
i18n = []
raise_validation_errors = True
skip_validation = False
# if you want to have all schemaless benefits (default False but should change)
# warning, if use_schemaless is True, Migration features can not be used.
use_schemaless = False
# If you want to use the dot notation, set this to True:
use_dot_notation = False
dot_notation_warning = False
authorized_types = [
type(None),
bool,
int,
long,
float,
unicode,
basestring,
list,
dict,
datetime.datetime,
bson.binary.Binary,
CustomType,
]
def __init__(self, doc=None, gen_skel=True, _gen_auth_types=True, _validate=True, lang='en', fallback_lang='en'):
"""
doc : a dictionary
gen_skel : if True, generate automatically the skeleton of the doc
filled with NoneType each time validate() is called. Note that
if doc is not {}, gen_skel is always False. If gen_skel is False,
default_values cannot be filled.
gen_auth_types: if True, generate automatically the self.authorized_types
attribute from self.authorized_types
"""
super(SchemaDocument, self).__init__()
if self.structure is None:
self.structure = {}
self._current_lang = lang
self._fallback_lang = fallback_lang
self.validation_errors = {}
# init
if doc:
for k, v in doc.iteritems():
self[k] = v
gen_skel = False
if gen_skel:
self.generate_skeleton()
if self.default_values:
self._set_default_fields(self, self.structure)
else:
self._process_custom_type('python', self, self.structure)
if self.use_dot_notation:
self.__generate_doted_dict(self, self.structure)
if self.i18n:
self._make_i18n()
def generate_skeleton(self):
"""
validate and generate the skeleton of the document
from the structure (unknown values are set to None)
"""
self.__generate_skeleton(self, self.structure)
def validate(self):
"""
validate the document.
This method will verify if :
* the doc follow the structure,
* all required fields are filled
Additionally, this method will process all
validators.
"""
if self.validators:
self._process_validators(self, self.structure)
self._process_custom_type('bson', self, self.structure)
self._validate_doc(self, self.structure)
self._process_custom_type('python', self, self.structure)
if self.required_fields:
self._validate_required(self, self.structure)
def __setattr__(self, key, value):
if key not in self._protected_field_names and self.use_dot_notation and key in self:
if isinstance(self.structure[key], i18n):
self[key][self._current_lang] = value
else:
self[key] = value
else:
if self.dot_notation_warning and not key.startswith('_') and key not in \
['db', 'collection', 'versioning_collection', 'connection', 'fs']:
log.warning("dot notation: {} was not found in structure. Add it as attribute instead".format(key))
dict.__setattr__(self, key, value)
def __getattr__(self, key):
if key not in self._protected_field_names and self.use_dot_notation and key in self:
if isinstance(self[key], i18n):
if self._current_lang not in self[key]:
return self[key].get(self._fallback_lang)
return self[key][self._current_lang]
return self[key]
else:
return dict.__getattribute__(self, key)
#
# Public API end
#
@classmethod
def __walk_dict(cls, dic):
# thanks jean_b for the patch
for key, value in dic.items():
if isinstance(value, dict) and len(value):
if type(key) is type:
yield '$%s' % key.__name__
else:
yield key
for child_key in cls.__walk_dict(value):
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
#if type(child_key) is type:
# new_child_key = "$%s" % child_key.__name__
#else:
if type(child_key) is not type:
new_child_key = child_key
yield '%s.%s' % (new_key, new_child_key)
elif type(key) is type:
yield '$%s' % key.__name__
# elif isinstance(value, list) and len(value):
# if isinstance(value[0], dict):
# for child_key in cls.__walk_dict(value[0]):
# #if type(key) is type:
# # new_key = "$%s" % key.__name__
# #else:
# if type(key) is not type:
# new_key = key
# #if type(child_key) is type:
# # new_child_key = "$%s" % child_key.__name__
# #else:
# if type(child_key) is not type:
# new_child_key = child_key
# yield '%s.%s' % (new_key, new_child_key)
# else:
# if type(key) is not type:
# yield key
# #else:
# # yield ""
else:
if type(key) is not type:
yield key
#else:
# yield ""
@classmethod
def _validate_structure(cls, structure, name, authorized_types):
"""
validate if all fields in self.structure are in authorized types.
"""
##############
def __validate_structure(struct, name, _authorized):
if type(struct) is type:
if struct not in authorized_types:
if struct not in authorized_types:
raise StructureError("%s: %s is not an authorized type" % (name, struct))
elif isinstance(struct, dict):
for key in struct:
if isinstance(key, basestring):
if "." in key:
raise BadKeyError("%s: %s must not contain '.'" % (name, key))
if key.startswith('$'):
raise BadKeyError("%s: %s must not start with '$'" % (name, key))
elif type(key) is type:
if not key in authorized_types:
raise AuthorizedTypeError("%s: %s is not an authorized type" % (name, key))
else:
raise StructureError("%s: %s must be a basestring or a type" % (name, key))
if struct[key] is None:
pass
elif isinstance(struct[key], dict):
__validate_structure(struct[key], name, authorized_types)
elif isinstance(struct[key], list):
__validate_structure(struct[key], name, authorized_types)
elif isinstance(struct[key], tuple):
__validate_structure(struct[key], name, authorized_types)
elif isinstance(struct[key], CustomType):
__validate_structure(struct[key].mongo_type, name, authorized_types)
elif isinstance(struct[key], SchemaProperties):
pass
elif isinstance(struct[key], SchemaOperator):
__validate_structure(struct[key], name, authorized_types)
elif hasattr(struct[key], 'structure'):
__validate_structure(struct[key], name, authorized_types)
elif struct[key] not in authorized_types:
ok = False
for auth_type in authorized_types:
if struct[key] is None:
ok = True
else:
try:
if isinstance(struct[key], auth_type) or issubclass(struct[key], auth_type):
ok = True
except TypeError:
raise TypeError("%s: %s is not a type" % (name, struct[key]))
if not ok:
raise StructureError(
"%s: %s is not an authorized type" % (name, struct[key]))
elif isinstance(struct, list) or isinstance(struct, tuple):
for item in struct:
__validate_structure(item, name, authorized_types)
elif isinstance(struct, SchemaOperator):
if isinstance(struct, IS):
for operand in struct:
if type(operand) not in authorized_types:
raise StructureError("%s: %s in %s is not an authorized type (%s found)" % (
name, operand, struct, type(operand).__name__))
else:
for operand in struct:
if operand not in authorized_types:
raise StructureError("%s: %s in %s is not an authorized type (%s found)" % (
name, operand, struct, type(operand).__name__))
elif isinstance(struct, SchemaProperties):
pass
else:
ok = False
for auth_type in authorized_types:
if isinstance(struct, auth_type):
ok = True
if not ok:
raise StructureError("%s: %s is not an authorized_types" % (name, struct))
#################
if structure is None:
raise StructureError("%s.structure must not be None" % name)
if not isinstance(structure, dict):
raise StructureError("%s.structure must be a dict instance" % name)
__validate_structure(structure, name, authorized_types)
def _raise_exception(self, exception, field, message):
if self.raise_validation_errors:
raise exception(message)
else:
if not field in self.validation_errors:
self.validation_errors[field] = []
self.validation_errors[field].append(exception(message))
def _validate_doc(self, doc, struct, path=""):
"""
check if doc field types match the doc field structure
"""
if type(struct) is type or struct is None:
if struct is None:
if type(doc) not in self.authorized_types:
self._raise_exception(AuthorizedTypeError, type(doc).__name__,
"%s is not an authorized types" % type(doc).__name__)
elif not isinstance(doc, struct) and doc is not None:
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of %s not %s" % (
path, struct.__name__, type(doc).__name__))
elif isinstance(struct, CustomType):
if not isinstance(doc, struct.mongo_type) and doc is not None:
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of %s not %s" % (
path, struct.mongo_type.__name__, type(doc).__name__))
struct.validate(doc, path=path)
elif isinstance(struct, SchemaOperator):
if not struct.validate(doc) and doc is not None:
if isinstance(struct, IS):
self._raise_exception(SchemaTypeError, path,
"%s must be in %s not %s" % (path, struct._operands, doc))
else:
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of %s not %s" % (path, struct, type(doc).__name__))
elif isinstance(struct, dict):
if not isinstance(doc, type(struct)):
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of %s not %s" % (
path, type(struct).__name__, type(doc).__name__))
struct_length = len(struct) if not '_id' in struct else len(struct) - 1
if len(doc) != struct_length:
struct_doc_diff = list(set(struct).difference(set(doc)))
if struct_doc_diff:
for field in struct_doc_diff:
if (type(field) is not type) and (not self.use_schemaless):
self._raise_exception(StructureError, None,
"missed fields %s in %s" % (struct_doc_diff, type(doc).__name__))
else:
struct_struct_diff = list(set(doc).difference(set(struct)))
bad_fields = [s for s in struct_struct_diff if s not in STRUCTURE_KEYWORDS]
if bad_fields and not self.use_schemaless:
self._raise_exception(StructureError, None,
"unknown fields %s in %s" % (bad_fields, type(doc).__name__))
for key in struct:
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
if new_key.split('.')[-1].startswith("$"):
for doc_key in doc:
if not isinstance(doc_key, key):
self._raise_exception(SchemaTypeError, path,
"key of %s must be an instance of %s not %s" % (
path, key.__name__, type(doc_key).__name__))
self._validate_doc(doc[doc_key], struct[key], new_path)
else:
if key in doc:
self._validate_doc(doc[key], struct[key], new_path)
elif isinstance(struct, list):
if not isinstance(doc, list) and not isinstance(doc, tuple):
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of list not %s" % (path, type(doc).__name__))
if not len(struct):
struct = None
else:
struct = struct[0]
for obj in doc:
self._validate_doc(obj, struct, path)
elif isinstance(struct, tuple):
if not isinstance(doc, list) and not isinstance(doc, tuple):
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of list not %s" % (
path, type(doc).__name__))
if len(doc) != len(struct):
self._raise_exception(SchemaTypeError, path, "%s must have %s items not %s" % (
path, len(struct), len(doc)))
for i in range(len(struct)):
self._validate_doc(doc[i], struct[i], path)
def _process_validators(self, doc, _struct, _path=""):
doted_doc = DotCollapsedDict(doc)
for key, validators in self.validators.iteritems():
if key in doted_doc and doted_doc[key] is not None:
if not hasattr(validators, "__iter__"):
validators = [validators]
for validator in validators:
try:
if not validator(doted_doc[key]):
raise ValidationError("%s does not pass the validator " + validator.__name__)
except Exception, e:
self._raise_exception(ValidationError, key,
unicode(e) % key)
def _process_custom_type(self, target, doc, struct, path="", root_path=""):
for key in struct:
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
#
# if the value is a dict, we have a another structure to validate
#
#
# It is not a dict nor a list but a simple key:value
#
if isinstance(struct[key], CustomType):
if target == 'bson':
if key in doc:
if struct[key].python_type is not None:
if not isinstance(doc[key], struct[key].python_type) and doc[key] is not None:
self._raise_exception(SchemaTypeError, new_path,
"%s must be an instance of %s not %s" % (
new_path, struct[key].python_type.__name__,
type(doc[key]).__name__))
doc[key] = struct[key].to_bson(doc[key])
else:
if key in doc:
doc[key] = struct[key].to_python(doc[key])
elif isinstance(struct[key], dict):
if doc: # we don't need to process an empty doc
if type(key) is type:
for doc_key in doc: # process type's key such {unicode:int}...
self._process_custom_type(target, doc[doc_key], struct[key], new_path, root_path)
else:
if key in doc: # we don't care about missing fields
self._process_custom_type(target, doc[key], struct[key], new_path, root_path)
#
# If the struct is a list, we have to validate all values into it
#
elif type(struct[key]) is list:
#
# check if the list must not be null
#
if struct[key]:
l_objs = []
if isinstance(struct[key][0], CustomType):
for obj in doc[key]:
if target == 'bson':
if struct[key][0].python_type is not None:
if not isinstance(obj, struct[key][0].python_type) and obj is not None:
self._raise_exception(SchemaTypeError, new_path,
"%s must be an instance of %s not %s" % (
new_path, struct[key][0].python_type.__name__,
type(obj).__name__))
obj = struct[key][0].to_bson(obj)
else:
obj = struct[key][0].to_python(obj)
l_objs.append(obj)
doc[key] = l_objs
elif isinstance(struct[key][0], dict):
if doc.get(key):
for obj in doc[key]:
self._process_custom_type(target, obj, struct[key][0], new_path, root_path)
def _set_default_fields(self, doc, struct, path=""):
# TODO check this out, this method must be restructured
for key in struct:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
#
# default_values :
# if the value is None, check if a default value exist.
# if exists, and it is a function then call it otherwise,
# juste feed it
#
if type(key) is not type:
if doc[key] is None and new_path in self.default_values:
new_value = self.default_values[new_path]
if callable(new_value):
new_value = new_value()
elif isinstance(new_value, dict):
new_value = deepcopy(new_value)
elif isinstance(new_value, list):
new_value = new_value[:]
if isinstance(struct[key], CustomType):
if not isinstance(new_value, struct[key].python_type):
self._raise_exception(DefaultFieldTypeError, new_path,
"%s must be an instance of %s not %s" % (
new_path, struct[key].python_type.__name__,
type(new_value).__name__))
doc[key] = new_value
#
# if the value is a dict, we have a another structure to validate
#
if isinstance(struct[key], dict) and new_path not in self.i18n:
#
# if the dict is still empty into the document we build
# it with None values
#
if len(struct[key]) and not [i for i in struct[key].keys() if type(i) is type]:
self._set_default_fields(doc[key], struct[key], new_path)
else:
if new_path in self.default_values:
new_value = self.default_values[new_path]
if callable(new_value):
new_value = new_value()
elif isinstance(new_value, dict):
new_value = deepcopy(new_value)
elif isinstance(new_value, list):
new_value = new_value[:]
doc[key] = new_value
elif isinstance(struct[key], list):
if new_path in self.default_values:
for new_value in self.default_values[new_path]:
if callable(new_value):
new_value = new_value()
elif isinstance(new_value, dict):
new_value = deepcopy(new_value)
elif isinstance(new_value, list):
new_value = new_value[:]
if isinstance(struct[key][0], CustomType):
if not isinstance(new_value, struct[key][0].python_type):
self._raise_exception(DefaultFieldTypeError, new_path,
"%s must be an instance of %s not %s" % (
new_path, struct[key][0].python_type.__name__,
type(new_value).__name__))
doc[key].append(new_value)
else: # what else
if new_path in self.default_values:
new_value = self.default_values[new_path]
if callable(new_value):
new_value = new_value()
elif isinstance(new_value, dict):
new_value = deepcopy(new_value)
elif isinstance(new_value, list):
new_value = new_value[:]
if new_path in self.i18n:
doc[key] = i18n(
field_type=struct[key],
field_name=key
)
doc[key].update(new_value)
else:
doc[key] = new_value
def _validate_required(self, doc, _struct, _path="", _root_path=""):
doted_struct = DotCollapsedDict(self.structure)
doted_doc = DotCollapsedDict(doc, reference=doted_struct)
for req in self.required_fields:
if doted_doc.get(req) is None and doted_struct.get(req) is not dict:
if not isinstance(doted_struct.get(req), CustomType):
self._raise_exception(RequireFieldError, req, "%s is required" % req)
elif isinstance(doted_struct.get(req), CustomType) and doted_struct[req].mongo_type is not dict:
self._raise_exception(RequireFieldError, req, "%s is required" % req)
elif doted_doc.get(req) == []:
self._raise_exception(RequireFieldError, req, "%s is required" % req)
elif doted_doc.get(req) == {}:
self._raise_exception(RequireFieldError, req, "%s is required" % req)
def __generate_skeleton(self, doc, struct, path=""):
for key in struct:
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
#
# Automatique generate the skeleton with NoneType
#
if type(key) is not type and key not in doc:
if isinstance(struct[key], dict):
if type(struct[key]) is dict and self.use_dot_notation:
if new_path in self._i18n_namespace:
doc[key] = i18nDotedDict(doc.get(key, {}), self)
else:
doc[key] = DotedDict(doc.get(key, {}), warning=self.dot_notation_warning)
else:
if callable(struct[key]):
doc[key] = struct[key]()
else:
doc[key] = type(struct[key])()
elif struct[key] is dict:
doc[key] = {}
elif isinstance(struct[key], list):
doc[key] = type(struct[key])()
elif isinstance(struct[key], CustomType):
if struct[key].init_type is not None:
doc[key] = struct[key].init_type()
else:
doc[key] = None
elif struct[key] is list:
doc[key] = []
elif isinstance(struct[key], tuple):
doc[key] = [None for _ in range(len(struct[key]))]
else:
doc[key] = None
#
# if the value is a dict, we have a another structure to validate
#
if isinstance(struct[key], dict) and type(key) is not type:
self.__generate_skeleton(doc[key], struct[key], new_path)
def __generate_doted_dict(self, doc, struct, path=""):
for key in struct:
#
# Automatique generate the skeleton with NoneType
#
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
if type(key) is not type: # and key not in doc:
if isinstance(struct[key], dict):
if type(struct[key]) is dict:
if new_path in self._i18n_namespace:
doc[key] = i18nDotedDict(doc.get(key, {}), self)
else:
doc[key] = DotedDict(doc.get(key, {}), warning=self.dot_notation_warning)
#
# if the value is a dict, we have a another structure to validate
#
if isinstance(struct[key], dict) and type(key) is not type:
self.__generate_doted_dict(doc[key], struct[key], new_path)
def _make_i18n(self):
doted_dict = DotCollapsedDict(self.structure)
for field in self.i18n:
if field not in doted_dict:
self._raise_exception(ValidationError, field,
"%s not found in structure" % field)
if not isinstance(doted_dict[field], i18n):
doted_dict[field] = i18n(
field_type=doted_dict[field],
field_name=field
)
self.structure.update(DotExpandedDict(doted_dict))
def set_lang(self, lang):
self._current_lang = lang
def get_lang(self):
return self._current_lang
class i18n(dict, CustomType):
""" CustomType to deal with i18n """
mongo_type = list
def __init__(self, field_type=None, field_name=None):
super(i18n, self).__init__()
self.python_type = self.__class__
self._field_type = field_type
self._field_name = field_name
def __call__(self):
return i18n(self._field_type, self._field_name)
def to_bson(self, value):
if value is not None:
for l, v in value.iteritems():
if isinstance(v, list) and isinstance(self._field_type, list):
for i in v:
if not isinstance(i, self._field_type[0]):
raise SchemaTypeError("%s (%s) must be an instance of %s not %s" % (
self._field_name, l, self._field_type[0], type(i).__name__))
else:
if not isinstance(v, self._field_type):
raise SchemaTypeError("%s (%s) must be an instance of %s not %s" % (
self._field_name, l, self._field_type, type(v).__name__))
return [{'lang': l, 'value': v} for l, v in value.iteritems()]
def to_python(self, value):
if value is not None:
i18n_dict = self.__class__(self._field_type)
for i in value:
i18n_dict[i['lang']] = i['value']
return i18n_dict
class Set(CustomType):
""" SET custom type to handle python set() type """
init_type = set
mongo_type = list
python_type = set
def __init__(self, structure_type=None):
super(Set, self).__init__()
self._structure_type = structure_type
def to_bson(self, value):
if value is not None:
return list(value)
def to_python(self, value):
if value is not None:
return set(value)
def validate(self, value, path):
if value is not None and self._structure_type is not None:
for val in value:
if not isinstance(val, self._structure_type):
raise ValueError('%s must be an instance of %s not %s' %
(path, self._structure_type.__name__, type(val).__name__))
| bsd-3-clause |
martynovp/edx-platform | lms/djangoapps/ccx/tests/test_utils.py | 21 | 21985 | """
test utils
"""
from nose.plugins.attrib import attr
from ccx.models import ( # pylint: disable=import-error
CcxMembership,
CcxFutureMembership,
)
from ccx.tests.factories import ( # pylint: disable=import-error
CcxFactory,
CcxMembershipFactory,
CcxFutureMembershipFactory,
)
from student.roles import CourseCcxCoachRole # pylint: disable=import-error
from student.tests.factories import ( # pylint: disable=import-error
AdminFactory,
UserFactory,
)
from xmodule.modulestore.tests.django_utils import (
ModuleStoreTestCase,
TEST_DATA_SPLIT_MODULESTORE)
from xmodule.modulestore.tests.factories import CourseFactory
from ccx_keys.locator import CCXLocator
@attr('shard_1')
class TestEmailEnrollmentState(ModuleStoreTestCase):
"""unit tests for the EmailEnrollmentState class
"""
def setUp(self):
"""
Set up tests
"""
super(TestEmailEnrollmentState, self).setUp()
# remove user provided by the parent test case so we can make our own
# when needed.
self.user = None
course = CourseFactory.create()
coach = AdminFactory.create()
role = CourseCcxCoachRole(course.id)
role.add_users(coach)
self.ccx = CcxFactory(course_id=course.id, coach=coach)
def create_user(self):
"""provide a legitimate django user for testing
"""
if getattr(self, 'user', None) is None:
self.user = UserFactory()
def register_user_in_ccx(self):
"""create registration of self.user in self.ccx
registration will be inactive
"""
self.create_user()
CcxMembershipFactory(ccx=self.ccx, student=self.user)
def create_one(self, email=None):
"""Create a single EmailEnrollmentState object and return it
"""
from ccx.utils import EmailEnrollmentState # pylint: disable=import-error
if email is None:
email = self.user.email
return EmailEnrollmentState(self.ccx, email)
def test_enrollment_state_for_non_user(self):
"""verify behavior for non-user email address
"""
ee_state = self.create_one(email='nobody@nowhere.com')
for attribute in ['user', 'member', 'full_name', 'in_ccx']:
value = getattr(ee_state, attribute, 'missing attribute')
self.assertFalse(value, "{}: {}".format(value, attribute))
def test_enrollment_state_for_non_member_user(self):
"""verify behavior for email address of user who is not a ccx memeber
"""
self.create_user()
ee_state = self.create_one()
self.assertTrue(ee_state.user)
self.assertFalse(ee_state.in_ccx)
self.assertEqual(ee_state.member, self.user)
self.assertEqual(ee_state.full_name, self.user.profile.name)
def test_enrollment_state_for_member_user(self):
"""verify behavior for email address of user who is a ccx member
"""
self.create_user()
self.register_user_in_ccx()
ee_state = self.create_one()
for attribute in ['user', 'in_ccx']:
self.assertTrue(
getattr(ee_state, attribute, False),
"attribute {} is missing or False".format(attribute)
)
self.assertEqual(ee_state.member, self.user)
self.assertEqual(ee_state.full_name, self.user.profile.name)
def test_enrollment_state_to_dict(self):
"""verify dict representation of EmailEnrollmentState
"""
self.create_user()
self.register_user_in_ccx()
ee_state = self.create_one()
ee_dict = ee_state.to_dict()
expected = {
'user': True,
'member': self.user,
'in_ccx': True,
}
for expected_key, expected_value in expected.iteritems():
self.assertTrue(expected_key in ee_dict)
self.assertEqual(expected_value, ee_dict[expected_key])
def test_enrollment_state_repr(self):
self.create_user()
self.register_user_in_ccx()
ee_state = self.create_one()
representation = repr(ee_state)
self.assertTrue('user=True' in representation)
self.assertTrue('in_ccx=True' in representation)
member = 'member={}'.format(self.user)
self.assertTrue(member in representation)
@attr('shard_1')
# TODO: deal with changes in behavior for auto_enroll
class TestGetEmailParams(ModuleStoreTestCase):
"""tests for ccx.utils.get_email_params
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
"""
Set up tests
"""
super(TestGetEmailParams, self).setUp()
course = CourseFactory.create()
coach = AdminFactory.create()
role = CourseCcxCoachRole(course.id)
role.add_users(coach)
self.ccx = CcxFactory(course_id=course.id, coach=coach)
self.all_keys = [
'site_name', 'course', 'course_url', 'registration_url',
'course_about_url', 'auto_enroll'
]
self.url_keys = [k for k in self.all_keys if 'url' in k]
self.course_keys = [k for k in self.url_keys if 'course' in k]
def call_fut(self, auto_enroll=False, secure=False):
"""
call function under test
"""
from ccx.utils import get_email_params # pylint: disable=import-error
return get_email_params(self.ccx, auto_enroll, secure)
def test_params_have_expected_keys(self):
params = self.call_fut()
self.assertFalse(set(params.keys()) - set(self.all_keys))
def test_ccx_id_in_params(self):
expected_course_id = unicode(CCXLocator.from_course_locator(self.ccx.course_id, self.ccx.id))
params = self.call_fut()
self.assertEqual(params['course'], self.ccx)
for url_key in self.url_keys:
self.assertTrue('http://' in params[url_key])
for url_key in self.course_keys:
self.assertTrue(expected_course_id in params[url_key])
def test_security_respected(self):
secure = self.call_fut(secure=True)
for url_key in self.url_keys:
self.assertTrue('https://' in secure[url_key])
insecure = self.call_fut(secure=False)
for url_key in self.url_keys:
self.assertTrue('http://' in insecure[url_key])
def test_auto_enroll_passed_correctly(self):
not_auto = self.call_fut(auto_enroll=False)
self.assertFalse(not_auto['auto_enroll'])
auto = self.call_fut(auto_enroll=True)
self.assertTrue(auto['auto_enroll'])
@attr('shard_1')
# TODO: deal with changes in behavior for auto_enroll
class TestEnrollEmail(ModuleStoreTestCase):
"""tests for the enroll_email function from ccx.utils
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
super(TestEnrollEmail, self).setUp()
# unbind the user created by the parent, so we can create our own when
# needed.
self.user = None
course = CourseFactory.create()
coach = AdminFactory.create()
role = CourseCcxCoachRole(course.id)
role.add_users(coach)
self.ccx = CcxFactory(course_id=course.id, coach=coach)
self.outbox = self.get_outbox()
def create_user(self):
"""provide a legitimate django user for testing
"""
if getattr(self, 'user', None) is None:
self.user = UserFactory()
def register_user_in_ccx(self):
"""create registration of self.user in self.ccx
registration will be inactive
"""
self.create_user()
CcxMembershipFactory(ccx=self.ccx, student=self.user)
def get_outbox(self):
"""Return the django mail outbox"""
from django.core import mail
return mail.outbox
def check_membership(self, email=None, user=None, future=False):
"""Verify tjat an appropriate CCX Membership exists"""
if not email and not user:
self.fail(
"must provide user or email address to check CCX Membership"
)
if future and email:
membership = CcxFutureMembership.objects.filter(
ccx=self.ccx, email=email
)
elif not future:
if not user:
user = self.user
membership = CcxMembership.objects.filter(
ccx=self.ccx, student=user
)
self.assertTrue(membership.exists())
def check_enrollment_state(self, state, in_ccx, member, user):
"""Verify an enrollment state object against provided arguments
state.in_ccx will always be a boolean
state.user will always be a boolean
state.member will be a Django user object or None
"""
self.assertEqual(in_ccx, state.in_ccx)
self.assertEqual(member, state.member)
self.assertEqual(user, state.user)
def call_fut(
self,
student_email=None,
auto_enroll=False,
email_students=False,
email_params=None
):
"""Call function under test"""
from ccx.utils import enroll_email # pylint: disable=import-error
if student_email is None:
student_email = self.user.email
before, after = enroll_email(
self.ccx, student_email, auto_enroll, email_students, email_params
)
return before, after
def test_enroll_non_user_sending_email(self):
"""enroll a non-user email and send an enrollment email to them
"""
# ensure no emails are in the outbox now
self.assertEqual(self.outbox, [])
test_email = "nobody@nowhere.com"
before, after = self.call_fut(
student_email=test_email, email_students=True
)
# there should be a future membership set for this email address now
self.check_membership(email=test_email, future=True)
for state in [before, after]:
self.check_enrollment_state(state, False, None, False)
# mail was sent and to the right person
self.assertEqual(len(self.outbox), 1)
msg = self.outbox[0]
self.assertTrue(test_email in msg.recipients())
def test_enroll_non_member_sending_email(self):
"""register a non-member and send an enrollment email to them
"""
self.create_user()
# ensure no emails are in the outbox now
self.assertEqual(self.outbox, [])
before, after = self.call_fut(email_students=True)
# there should be a membership set for this email address now
self.check_membership(email=self.user.email)
self.check_enrollment_state(before, False, self.user, True)
self.check_enrollment_state(after, True, self.user, True)
# mail was sent and to the right person
self.assertEqual(len(self.outbox), 1)
msg = self.outbox[0]
self.assertTrue(self.user.email in msg.recipients())
def test_enroll_member_sending_email(self):
"""register a member and send an enrollment email to them
"""
self.register_user_in_ccx()
# ensure no emails are in the outbox now
self.assertEqual(self.outbox, [])
before, after = self.call_fut(email_students=True)
# there should be a membership set for this email address now
self.check_membership(email=self.user.email)
for state in [before, after]:
self.check_enrollment_state(state, True, self.user, True)
# mail was sent and to the right person
self.assertEqual(len(self.outbox), 1)
msg = self.outbox[0]
self.assertTrue(self.user.email in msg.recipients())
def test_enroll_non_user_no_email(self):
"""register a non-user via email address but send no email
"""
# ensure no emails are in the outbox now
self.assertEqual(self.outbox, [])
test_email = "nobody@nowhere.com"
before, after = self.call_fut(
student_email=test_email, email_students=False
)
# there should be a future membership set for this email address now
self.check_membership(email=test_email, future=True)
for state in [before, after]:
self.check_enrollment_state(state, False, None, False)
# ensure there are still no emails in the outbox now
self.assertEqual(self.outbox, [])
def test_enroll_non_member_no_email(self):
"""register a non-member but send no email"""
self.create_user()
# ensure no emails are in the outbox now
self.assertEqual(self.outbox, [])
before, after = self.call_fut(email_students=False)
# there should be a membership set for this email address now
self.check_membership(email=self.user.email)
self.check_enrollment_state(before, False, self.user, True)
self.check_enrollment_state(after, True, self.user, True)
# ensure there are still no emails in the outbox now
self.assertEqual(self.outbox, [])
def test_enroll_member_no_email(self):
"""enroll a member but send no email
"""
self.register_user_in_ccx()
# ensure no emails are in the outbox now
self.assertEqual(self.outbox, [])
before, after = self.call_fut(email_students=False)
# there should be a membership set for this email address now
self.check_membership(email=self.user.email)
for state in [before, after]:
self.check_enrollment_state(state, True, self.user, True)
# ensure there are still no emails in the outbox now
self.assertEqual(self.outbox, [])
@attr('shard_1')
# TODO: deal with changes in behavior for auto_enroll
class TestUnenrollEmail(ModuleStoreTestCase):
"""Tests for the unenroll_email function from ccx.utils"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
super(TestUnenrollEmail, self).setUp()
# unbind the user created by the parent, so we can create our own when
# needed.
self.user = None
course = CourseFactory.create()
coach = AdminFactory.create()
role = CourseCcxCoachRole(course.id)
role.add_users(coach)
self.ccx = CcxFactory(course_id=course.id, coach=coach)
self.outbox = self.get_outbox()
self.email = "nobody@nowhere.com"
def get_outbox(self):
"""Return the django mail outbox"""
from django.core import mail
return mail.outbox
def create_user(self):
"""provide a legitimate django user for testing
"""
if getattr(self, 'user', None) is None:
self.user = UserFactory()
def make_ccx_membership(self):
"""create registration of self.user in self.ccx
registration will be inactive
"""
self.create_user()
CcxMembershipFactory.create(ccx=self.ccx, student=self.user)
def make_ccx_future_membership(self):
"""create future registration for email in self.ccx"""
CcxFutureMembershipFactory.create(
ccx=self.ccx, email=self.email
)
def check_enrollment_state(self, state, in_ccx, member, user):
"""Verify an enrollment state object against provided arguments
state.in_ccx will always be a boolean
state.user will always be a boolean
state.member will be a Django user object or None
"""
self.assertEqual(in_ccx, state.in_ccx)
self.assertEqual(member, state.member)
self.assertEqual(user, state.user)
def check_membership(self, future=False):
"""
check membership
"""
if future:
membership = CcxFutureMembership.objects.filter(
ccx=self.ccx, email=self.email
)
else:
membership = CcxMembership.objects.filter(
ccx=self.ccx, student=self.user
)
return membership.exists()
def call_fut(self, email_students=False):
"""call function under test"""
from ccx.utils import unenroll_email # pylint: disable=import-error
email = getattr(self, 'user', None) and self.user.email or self.email
return unenroll_email(self.ccx, email, email_students=email_students)
def test_unenroll_future_member_with_email(self):
"""unenroll a future member and send an email
"""
self.make_ccx_future_membership()
# assert that a membership exists and that no emails have been sent
self.assertTrue(self.check_membership(future=True))
self.assertEqual(self.outbox, [])
# unenroll the student
before, after = self.call_fut(email_students=True)
# assert that membership is now gone
self.assertFalse(self.check_membership(future=True))
# validate the before and after enrollment states
for state in [before, after]:
self.check_enrollment_state(state, False, None, False)
# check that mail was sent and to the right person
self.assertEqual(len(self.outbox), 1)
msg = self.outbox[0]
self.assertTrue(self.email in msg.recipients())
def test_unenroll_member_with_email(self):
"""unenroll a current member and send an email"""
self.make_ccx_membership()
# assert that a membership exists and that no emails have been sent
self.assertTrue(self.check_membership())
self.assertEqual(self.outbox, [])
# unenroll the student
before, after = self.call_fut(email_students=True)
# assert that membership is now gone
self.assertFalse(self.check_membership())
# validate the before and after enrollment state
self.check_enrollment_state(after, False, self.user, True)
self.check_enrollment_state(before, True, self.user, True)
# check that mail was sent and to the right person
self.assertEqual(len(self.outbox), 1)
msg = self.outbox[0]
self.assertTrue(self.user.email in msg.recipients())
def test_unenroll_future_member_no_email(self):
"""unenroll a future member but send no email
"""
self.make_ccx_future_membership()
# assert that a membership exists and that no emails have been sent
self.assertTrue(self.check_membership(future=True))
self.assertEqual(self.outbox, [])
# unenroll the student
before, after = self.call_fut()
# assert that membership is now gone
self.assertFalse(self.check_membership(future=True))
# validate the before and after enrollment states
for state in [before, after]:
self.check_enrollment_state(state, False, None, False)
# no email was sent to the student
self.assertEqual(self.outbox, [])
def test_unenroll_member_no_email(self):
"""unenroll a current member but send no email
"""
self.make_ccx_membership()
# assert that a membership exists and that no emails have been sent
self.assertTrue(self.check_membership())
self.assertEqual(self.outbox, [])
# unenroll the student
before, after = self.call_fut()
# assert that membership is now gone
self.assertFalse(self.check_membership())
# validate the before and after enrollment state
self.check_enrollment_state(after, False, self.user, True)
self.check_enrollment_state(before, True, self.user, True)
# no email was sent to the student
self.assertEqual(self.outbox, [])
@attr('shard_1')
class TestGetMembershipTriplets(ModuleStoreTestCase):
"""Verify that get_ccx_membership_triplets functions properly"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
"""Set up a course, coach, ccx and user"""
super(TestGetMembershipTriplets, self).setUp()
self.course = CourseFactory.create()
coach = AdminFactory.create()
role = CourseCcxCoachRole(self.course.id)
role.add_users(coach)
self.ccx = CcxFactory(course_id=self.course.id, coach=coach)
def make_ccx_membership(self, active=True):
"""create registration of self.user in self.ccx
registration will be inactive
"""
CcxMembershipFactory.create(ccx=self.ccx, student=self.user, active=active)
def call_fut(self, org_filter=None, org_filter_out=()):
"""call the function under test in this test case"""
from ccx.utils import get_ccx_membership_triplets
return list(
get_ccx_membership_triplets(self.user, org_filter, org_filter_out)
)
def test_no_membership(self):
"""verify that no triplets are returned if there are no memberships
"""
triplets = self.call_fut()
self.assertEqual(len(triplets), 0)
def test_has_membership(self):
"""verify that a triplet is returned when a membership exists
"""
self.make_ccx_membership()
triplets = self.call_fut()
self.assertEqual(len(triplets), 1)
ccx, membership, course = triplets[0]
self.assertEqual(ccx.id, self.ccx.id)
self.assertEqual(unicode(course.id), unicode(self.course.id))
self.assertEqual(membership.student, self.user)
def test_has_membership_org_filtered(self):
"""verify that microsite org filter prevents seeing microsite ccx"""
self.make_ccx_membership()
bad_org = self.course.location.org + 'foo'
triplets = self.call_fut(org_filter=bad_org)
self.assertEqual(len(triplets), 0)
def test_has_membership_org_filtered_out(self):
"""verify that microsite ccxs not seen in non-microsite view"""
self.make_ccx_membership()
filter_list = [self.course.location.org]
triplets = self.call_fut(org_filter_out=filter_list)
self.assertEqual(len(triplets), 0)
| agpl-3.0 |
public-ink/public-ink | server/appengine/lib/graphql/validation/rules/no_unused_fragments.py | 3 | 1489 | from ...error import GraphQLError
from .base import ValidationRule
class NoUnusedFragments(ValidationRule):
__slots__ = 'fragment_definitions', 'operation_definitions', 'fragment_adjacencies', 'spread_names'
def __init__(self, context):
super(NoUnusedFragments, self).__init__(context)
self.operation_definitions = []
self.fragment_definitions = []
def enter_OperationDefinition(self, node, key, parent, path, ancestors):
self.operation_definitions.append(node)
return False
def enter_FragmentDefinition(self, node, key, parent, path, ancestors):
self.fragment_definitions.append(node)
return False
def leave_Document(self, node, key, parent, path, ancestors):
fragment_names_used = set()
for operation in self.operation_definitions:
fragments = self.context.get_recursively_referenced_fragments(operation)
for fragment in fragments:
fragment_names_used.add(fragment.name.value)
for fragment_definition in self.fragment_definitions:
if fragment_definition.name.value not in fragment_names_used:
self.context.report_error(GraphQLError(
self.unused_fragment_message(fragment_definition.name.value),
[fragment_definition]
))
@staticmethod
def unused_fragment_message(fragment_name):
return 'Fragment "{}" is never used.'.format(fragment_name)
| gpl-3.0 |
virneo/nupic | nupic/data/inference_shifter.py | 39 | 3485 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""TimeShifter class for shifting ModelResults."""
import collections
import copy
from nupic.frameworks.opf.opfutils import InferenceElement, ModelResult
class InferenceShifter(object):
"""Shifts time for ModelResult objects."""
def __init__(self):
self._inferenceBuffer = None
def shift(self, modelResult):
"""Shift the model result and return the new instance.
Queues up the T(i+1) prediction value and emits a T(i)
input/prediction pair, if possible. E.g., if the previous T(i-1)
iteration was learn-only, then we would not have a T(i) prediction in our
FIFO and would not be able to emit a meaningful input/prediction pair.
Args:
modelResult: A ModelResult instance to shift.
Returns:
A ModelResult instance.
"""
inferencesToWrite = {}
if self._inferenceBuffer is None:
maxDelay = InferenceElement.getMaxDelay(modelResult.inferences)
self._inferenceBuffer = collections.deque(maxlen=maxDelay + 1)
self._inferenceBuffer.appendleft(copy.deepcopy(modelResult.inferences))
for inferenceElement, inference in modelResult.inferences.iteritems():
if isinstance(inference, dict):
inferencesToWrite[inferenceElement] = {}
for key, _ in inference.iteritems():
delay = InferenceElement.getTemporalDelay(inferenceElement, key)
if len(self._inferenceBuffer) > delay:
prevInference = self._inferenceBuffer[delay][inferenceElement][key]
inferencesToWrite[inferenceElement][key] = prevInference
else:
inferencesToWrite[inferenceElement][key] = None
else:
delay = InferenceElement.getTemporalDelay(inferenceElement)
if len(self._inferenceBuffer) > delay:
inferencesToWrite[inferenceElement] = (
self._inferenceBuffer[delay][inferenceElement])
else:
if type(inference) in (list, tuple):
inferencesToWrite[inferenceElement] = [None] * len(inference)
else:
inferencesToWrite[inferenceElement] = None
shiftedResult = ModelResult(rawInput=modelResult.rawInput,
sensorInput=modelResult.sensorInput,
inferences=inferencesToWrite,
metrics=modelResult.metrics,
predictedFieldIdx=modelResult.predictedFieldIdx,
predictedFieldName=modelResult.predictedFieldName)
return shiftedResult
| agpl-3.0 |
vinilios/synnefo | snf-astakos-app/astakos/im/register.py | 8 | 6325 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from synnefo.util import units
from astakos.im.models import Resource, Service, Endpoint, EndpointData
from astakos.im import quotas
import logging
logger = logging.getLogger(__name__)
main_fields = ['desc', 'unit']
config_fields = ['ui_visible', 'api_visible']
class RegisterException(Exception):
pass
def different_component(service, resource):
try:
registered_for = Service.objects.get(name=resource.service_origin)
return registered_for.component != service.component
except Service.DoesNotExist:
return False
def add_resource(resource_dict):
name = resource_dict.get('name')
service_type = resource_dict.get('service_type')
service_origin = resource_dict.get('service_origin')
if not name or not service_type or not service_origin:
raise RegisterException("Malformed resource dict.")
try:
service = Service.objects.get(name=service_origin)
except Service.DoesNotExist:
m = "There is no service %s." % service_origin
raise RegisterException(m)
try:
r = Resource.objects.select_for_update().get(name=name)
exists = True
if r.service_type != service_type and \
different_component(service, r):
m = ("There already exists a resource named %s with service "
"type %s." % (name, r.service_type))
raise RegisterException(m)
if r.service_origin != service_origin and \
different_component(service, r):
m = ("There already exists a resource named %s registered for "
"service %s." % (name, r.service_origin))
raise RegisterException(m)
r.service_origin = service_origin
r.service_type = service_type
except Resource.DoesNotExist:
r = Resource(name=name,
uplimit=units.PRACTICALLY_INFINITE,
service_type=service_type,
service_origin=service_origin)
exists = False
for field in config_fields:
value = resource_dict.get(field)
if value is not None:
setattr(r, field, value)
r.project_default = 0 if r.api_visible else units.PRACTICALLY_INFINITE
for field in main_fields:
value = resource_dict.get(field)
if value is not None:
setattr(r, field, value)
if r.ui_visible and not r.api_visible:
m = "Flag 'ui_visible' should entail 'api_visible'."
raise RegisterException(m)
r.save()
if not exists:
quotas.qh_sync_new_resource(r)
if exists:
logger.info("Updated resource %s." % (name))
else:
logger.info("Added resource %s." % (name))
return r, exists
def update_base_default(resource, base_default):
old_base_default = resource.uplimit
if base_default == old_base_default:
logger.info("Resource %s has base default %s; no need to update."
% (resource.name, base_default))
else:
resource.uplimit = base_default
resource.save()
logger.info("Updated resource %s with base default %s."
% (resource.name, base_default))
def update_project_default(resource, project_default):
old_project_default = resource.project_default
if project_default == old_project_default:
logger.info("Resource %s has project default %s; no need to update."
% (resource.name, project_default))
else:
resource.project_default = project_default
resource.save()
logger.info("Updated resource %s with project default %s."
% (resource.name, project_default))
def resources_to_dict(resources):
resource_dict = {}
for r in resources:
resource_dict[r.name] = r.get_info()
return resource_dict
def get_resources(resources=None, services=None):
if resources is None:
rs = Resource.objects.all()
else:
rs = Resource.objects.filter(name__in=resources)
if services is not None:
rs = rs.filter(service__in=services)
return rs
def get_api_visible_resources(resources=None, services=None):
rs = get_resources(resources, services)
return rs.filter(api_visible=True)
def add_endpoint(component, service, endpoint_dict, out=None):
endpoint = Endpoint.objects.create(service=service)
for key, value in endpoint_dict.iteritems():
base_url = component.base_url
if key == "publicURL" and (base_url is None or
not value.startswith(base_url)):
warn = out.write if out is not None else logger.warning
warn("Warning: Endpoint URL '%s' does not start with "
"assumed component base URL '%s'.\n" % (value, base_url))
EndpointData.objects.create(
endpoint=endpoint, key=key, value=value)
def add_service(component, name, service_type, endpoints, out=None):
defaults = {'component': component,
'type': service_type,
}
service, created = Service.objects.get_or_create(
name=name, defaults=defaults)
if not created:
if service.component != component:
m = ("There is already a service named %s registered by %s." %
(name, service.component.name))
raise RegisterException(m)
service.endpoints.all().delete()
for key, value in defaults.iteritems():
setattr(service, key, value)
service.save()
for endpoint in endpoints:
add_endpoint(component, service, endpoint, out=out)
return not created
| gpl-3.0 |
crosswalk-project/crosswalk-test-suite | webapi/webapi-appuri-w3c-tests/inst.apk.py | 1996 | 3186 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARAMETERS = None
ADB_CMD = "adb"
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s uninstall org.xwalk.%s" % (
ADB_CMD, PARAMETERS.device, os.path.basename(os.path.splitext(file)[0]))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def instPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s install %s" % (ADB_CMD,
PARAMETERS.device, os.path.join(root, file))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.device:
(return_code, output) = doCMD("adb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
if not PARAMETERS.device:
print "No device found"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
c0hen/django-venv | lib/python3.4/site-packages/django/db/backends/oracle/creation.py | 23 | 17817 | import sys
import time
from django.conf import settings
from django.db.backends.base.creation import BaseDatabaseCreation
from django.db.utils import DatabaseError
from django.utils.crypto import get_random_string
from django.utils.functional import cached_property
from django.utils.six.moves import input
TEST_DATABASE_PREFIX = 'test_'
class DatabaseCreation(BaseDatabaseCreation):
@cached_property
def _maindb_connection(self):
"""
This is analogous to other backends' `_nodb_connection` property,
which allows access to an "administrative" connection which can
be used to manage the test databases.
For Oracle, the only connection that can be used for that purpose
is the main (non-test) connection.
"""
settings_dict = settings.DATABASES[self.connection.alias]
user = settings_dict.get('SAVED_USER') or settings_dict['USER']
password = settings_dict.get('SAVED_PASSWORD') or settings_dict['PASSWORD']
settings_dict = settings_dict.copy()
settings_dict.update(USER=user, PASSWORD=password)
DatabaseWrapper = type(self.connection)
return DatabaseWrapper(settings_dict, alias=self.connection.alias)
def _create_test_db(self, verbosity=1, autoclobber=False, keepdb=False):
parameters = self._get_test_db_params()
cursor = self._maindb_connection.cursor()
if self._test_database_create():
try:
self._execute_test_db_creation(cursor, parameters, verbosity, keepdb)
except Exception as e:
# if we want to keep the db, then no need to do any of the below,
# just return and skip it all.
if keepdb:
return
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = input(
"It appears the test database, %s, already exists. "
"Type 'yes' to delete it, or 'no' to cancel: " % parameters['user'])
if autoclobber or confirm == 'yes':
if verbosity >= 1:
print("Destroying old test database for alias '%s'..." % self.connection.alias)
try:
self._execute_test_db_destruction(cursor, parameters, verbosity)
except DatabaseError as e:
if 'ORA-29857' in str(e):
self._handle_objects_preventing_db_destruction(cursor, parameters,
verbosity, autoclobber)
else:
# Ran into a database error that isn't about leftover objects in the tablespace
sys.stderr.write("Got an error destroying the old test database: %s\n" % e)
sys.exit(2)
except Exception as e:
sys.stderr.write("Got an error destroying the old test database: %s\n" % e)
sys.exit(2)
try:
self._execute_test_db_creation(cursor, parameters, verbosity, keepdb)
except Exception as e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
if self._test_user_create():
if verbosity >= 1:
print("Creating test user...")
try:
self._create_test_user(cursor, parameters, verbosity, keepdb)
except Exception as e:
# If we want to keep the db, then we want to also keep the user.
if keepdb:
return
sys.stderr.write("Got an error creating the test user: %s\n" % e)
if not autoclobber:
confirm = input(
"It appears the test user, %s, already exists. Type "
"'yes' to delete it, or 'no' to cancel: " % parameters['user'])
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test user...")
self._destroy_test_user(cursor, parameters, verbosity)
if verbosity >= 1:
print("Creating test user...")
self._create_test_user(cursor, parameters, verbosity, keepdb)
except Exception as e:
sys.stderr.write("Got an error recreating the test user: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
self._maindb_connection.close() # done with main user -- test user and tablespaces created
self._switch_to_test_user(parameters)
return self.connection.settings_dict['NAME']
def _switch_to_test_user(self, parameters):
"""
Oracle doesn't have the concept of separate databases under the same user.
Thus, we use a separate user (see _create_test_db). This method is used
to switch to that user. We will need the main user again for clean-up when
we end testing, so we keep its credentials in SAVED_USER/SAVED_PASSWORD
entries in the settings dict.
"""
real_settings = settings.DATABASES[self.connection.alias]
real_settings['SAVED_USER'] = self.connection.settings_dict['SAVED_USER'] = \
self.connection.settings_dict['USER']
real_settings['SAVED_PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD'] = \
self.connection.settings_dict['PASSWORD']
real_test_settings = real_settings['TEST']
test_settings = self.connection.settings_dict['TEST']
real_test_settings['USER'] = real_settings['USER'] = test_settings['USER'] = \
self.connection.settings_dict['USER'] = parameters['user']
real_settings['PASSWORD'] = self.connection.settings_dict['PASSWORD'] = parameters['password']
def set_as_test_mirror(self, primary_settings_dict):
"""
Set this database up to be used in testing as a mirror of a primary database
whose settings are given
"""
self.connection.settings_dict['USER'] = primary_settings_dict['USER']
self.connection.settings_dict['PASSWORD'] = primary_settings_dict['PASSWORD']
def _handle_objects_preventing_db_destruction(self, cursor, parameters, verbosity, autoclobber):
# There are objects in the test tablespace which prevent dropping it
# The easy fix is to drop the test user -- but are we allowed to do so?
print("There are objects in the old test database which prevent its destruction.")
print("If they belong to the test user, deleting the user will allow the test "
"database to be recreated.")
print("Otherwise, you will need to find and remove each of these objects, "
"or use a different tablespace.\n")
if self._test_user_create():
if not autoclobber:
confirm = input("Type 'yes' to delete user %s: " % parameters['user'])
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test user...")
self._destroy_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error destroying the test user: %s\n" % e)
sys.exit(2)
try:
if verbosity >= 1:
print("Destroying old test database for alias '%s'..." % self.connection.alias)
self._execute_test_db_destruction(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error destroying the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled -- test database cannot be recreated.")
sys.exit(1)
else:
print("Django is configured to use pre-existing test user '%s',"
" and will not attempt to delete it.\n" % parameters['user'])
print("Tests cancelled -- test database cannot be recreated.")
sys.exit(1)
def _destroy_test_db(self, test_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER']
self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD']
self.connection.close()
parameters = self._get_test_db_params()
cursor = self._maindb_connection.cursor()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
if self._test_user_create():
if verbosity >= 1:
print('Destroying test user...')
self._destroy_test_user(cursor, parameters, verbosity)
if self._test_database_create():
if verbosity >= 1:
print('Destroying test database tables...')
self._execute_test_db_destruction(cursor, parameters, verbosity)
self._maindb_connection.close()
def _execute_test_db_creation(self, cursor, parameters, verbosity, keepdb=False):
if verbosity >= 2:
print("_create_test_db(): dbname = %s" % parameters['user'])
statements = [
"""CREATE TABLESPACE %(tblspace)s
DATAFILE '%(datafile)s' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE %(maxsize)s
""",
"""CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
TEMPFILE '%(datafile_tmp)s' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE %(maxsize_tmp)s
""",
]
# Ignore "tablespace already exists" error when keepdb is on.
acceptable_ora_err = 'ORA-01543' if keepdb else None
self._execute_allow_fail_statements(cursor, statements, parameters, verbosity, acceptable_ora_err)
def _create_test_user(self, cursor, parameters, verbosity, keepdb=False):
if verbosity >= 2:
print("_create_test_user(): username = %s" % parameters['user'])
statements = [
"""CREATE USER %(user)s
IDENTIFIED BY "%(password)s"
DEFAULT TABLESPACE %(tblspace)s
TEMPORARY TABLESPACE %(tblspace_temp)s
QUOTA UNLIMITED ON %(tblspace)s
""",
"""GRANT CREATE SESSION,
CREATE TABLE,
CREATE SEQUENCE,
CREATE PROCEDURE,
CREATE TRIGGER
TO %(user)s""",
]
# Ignore "user already exists" error when keepdb is on
acceptable_ora_err = 'ORA-01920' if keepdb else None
success = self._execute_allow_fail_statements(cursor, statements, parameters, verbosity, acceptable_ora_err)
# If the password was randomly generated, change the user accordingly.
if not success and self._test_settings_get('PASSWORD') is None:
set_password = 'ALTER USER %(user)s IDENTIFIED BY "%(password)s"'
self._execute_statements(cursor, [set_password], parameters, verbosity)
# Most test-suites can be run without the create-view privilege. But some need it.
extra = "GRANT CREATE VIEW TO %(user)s"
success = self._execute_allow_fail_statements(cursor, [extra], parameters, verbosity, 'ORA-01031')
if not success and verbosity >= 2:
print("Failed to grant CREATE VIEW permission to test user. This may be ok.")
def _execute_test_db_destruction(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_execute_test_db_destruction(): dbname=%s" % parameters['user'])
statements = [
'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _destroy_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_destroy_test_user(): user=%s" % parameters['user'])
print("Be patient. This can take some time...")
statements = [
'DROP USER %(user)s CASCADE',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_statements(self, cursor, statements, parameters, verbosity, allow_quiet_fail=False):
for template in statements:
stmt = template % parameters
if verbosity >= 2:
print(stmt)
try:
cursor.execute(stmt)
except Exception as err:
if (not allow_quiet_fail) or verbosity >= 2:
sys.stderr.write("Failed (%s)\n" % (err))
raise
def _execute_allow_fail_statements(self, cursor, statements, parameters, verbosity, acceptable_ora_err):
"""
Execute statements which are allowed to fail silently if the Oracle
error code given by `acceptable_ora_err` is raised. Return True if the
statements execute without an exception, or False otherwise.
"""
try:
# Statement can fail when acceptable_ora_err is not None
allow_quiet_fail = acceptable_ora_err is not None and len(acceptable_ora_err) > 0
self._execute_statements(cursor, statements, parameters, verbosity, allow_quiet_fail=allow_quiet_fail)
return True
except DatabaseError as err:
description = str(err)
if acceptable_ora_err is None or acceptable_ora_err not in description:
raise
return False
def _get_test_db_params(self):
return {
'dbname': self._test_database_name(),
'user': self._test_database_user(),
'password': self._test_database_passwd(),
'tblspace': self._test_database_tblspace(),
'tblspace_temp': self._test_database_tblspace_tmp(),
'datafile': self._test_database_tblspace_datafile(),
'datafile_tmp': self._test_database_tblspace_tmp_datafile(),
'maxsize': self._test_database_tblspace_size(),
'maxsize_tmp': self._test_database_tblspace_tmp_size(),
}
def _test_settings_get(self, key, default=None, prefixed=None):
"""
Return a value from the test settings dict,
or a given default,
or a prefixed entry from the main settings dict
"""
settings_dict = self.connection.settings_dict
val = settings_dict['TEST'].get(key, default)
if val is None and prefixed:
val = TEST_DATABASE_PREFIX + settings_dict[prefixed]
return val
def _test_database_name(self):
return self._test_settings_get('NAME', prefixed='NAME')
def _test_database_create(self):
return self._test_settings_get('CREATE_DB', default=True)
def _test_user_create(self):
return self._test_settings_get('CREATE_USER', default=True)
def _test_database_user(self):
return self._test_settings_get('USER', prefixed='USER')
def _test_database_passwd(self):
password = self._test_settings_get('PASSWORD')
if password is None and self._test_user_create():
# Oracle passwords are limited to 30 chars and can't contain symbols.
password = get_random_string(length=30)
return password
def _test_database_tblspace(self):
return self._test_settings_get('TBLSPACE', prefixed='USER')
def _test_database_tblspace_tmp(self):
settings_dict = self.connection.settings_dict
return settings_dict['TEST'].get('TBLSPACE_TMP',
TEST_DATABASE_PREFIX + settings_dict['USER'] + '_temp')
def _test_database_tblspace_datafile(self):
tblspace = '%s.dbf' % self._test_database_tblspace()
return self._test_settings_get('DATAFILE', default=tblspace)
def _test_database_tblspace_tmp_datafile(self):
tblspace = '%s.dbf' % self._test_database_tblspace_tmp()
return self._test_settings_get('DATAFILE_TMP', default=tblspace)
def _test_database_tblspace_size(self):
return self._test_settings_get('DATAFILE_MAXSIZE', default='500M')
def _test_database_tblspace_tmp_size(self):
return self._test_settings_get('DATAFILE_TMP_MAXSIZE', default='500M')
def _get_test_db_name(self):
"""
We need to return the 'production' DB name to get the test DB creation
machinery to work. This isn't a great deal in this case because DB
names as handled by Django haven't real counterparts in Oracle.
"""
return self.connection.settings_dict['NAME']
def test_db_signature(self):
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME'],
self._test_database_user(),
)
| gpl-3.0 |
SOKP/kernel_samsung_espresso10 | tools/perf/python/twatch.py | 3213 | 1338 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, sample_period = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
uqtimes/Cocos2d-x_Study_20140127 | Cocos2d-x_3D/cocos2d/tools/gen-prebuilt/excopy.py | 94 | 3283 | #!/usr/bin/python
# ----------------------------------------------------------------------------
# extend methods for copy files/dirs
#
# Copyright 2014 (C) zhangbin
#
# License: MIT
# ----------------------------------------------------------------------------
import os
import shutil
def copy_files_in_dir(src, dst):
for item in os.listdir(src):
path = os.path.join(src, item)
if os.path.isfile(path):
shutil.copy(path, dst)
if os.path.isdir(path):
new_dst = os.path.join(dst, item)
if not os.path.isdir(new_dst):
os.makedirs(new_dst)
copy_files_in_dir(path, new_dst)
def copy_files_with_config(config, src_root, dst_root):
src_dir = config["from"]
dst_dir = config["to"]
src_dir = os.path.join(src_root, src_dir)
dst_dir = os.path.join(dst_root, dst_dir)
include_rules = None
if config.has_key("include"):
include_rules = config["include"]
include_rules = convert_rules(include_rules)
exclude_rules = None
if config.has_key("exclude"):
exclude_rules = config["exclude"]
exclude_rules = convert_rules(exclude_rules)
copy_files_with_rules(src_dir, src_dir, dst_dir, include_rules, exclude_rules)
def copy_files_with_rules(src_rootDir, src, dst, include = None, exclude = None):
if os.path.isfile(src):
if not os.path.exists(dst):
os.makedirs(dst)
shutil.copy(src, dst)
return
if (include is None) and (exclude is None):
if not os.path.exists(dst):
os.makedirs(dst)
copy_files_in_dir(src, dst)
elif (include is not None):
# have include
for name in os.listdir(src):
abs_path = os.path.join(src, name)
rel_path = os.path.relpath(abs_path, src_rootDir)
if os.path.isdir(abs_path):
sub_dst = os.path.join(dst, name)
copy_files_with_rules(src_rootDir, abs_path, sub_dst, include = include)
elif os.path.isfile(abs_path):
if _in_rules(rel_path, include):
if not os.path.exists(dst):
os.makedirs(dst)
shutil.copy(abs_path, dst)
elif (exclude is not None):
# have exclude
for name in os.listdir(src):
abs_path = os.path.join(src, name)
rel_path = os.path.relpath(abs_path, src_rootDir)
if os.path.isdir(abs_path):
sub_dst = os.path.join(dst, name)
copy_files_with_rules(src_rootDir, abs_path, sub_dst, exclude = exclude)
elif os.path.isfile(abs_path):
if not _in_rules(rel_path, exclude):
if not os.path.exists(dst):
os.makedirs(dst)
shutil.copy(abs_path, dst)
def _in_rules(rel_path, rules):
import re
ret = False
path_str = rel_path.replace("\\", "/")
for rule in rules:
if re.match(rule, path_str):
ret = True
return ret
def convert_rules(rules):
ret_rules = []
for rule in rules:
ret = rule.replace('.', '\\.')
ret = ret.replace('*', '.*')
ret = "%s" % ret
ret_rules.append(ret)
return ret_rules
| mit |
thaumos/ansible | lib/ansible/module_utils/vexata.py | 37 | 3074 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2019, Sandeep Kasargod <sandeep@vexata.com>
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
HAS_VEXATAPI = True
try:
from vexatapi.vexata_api_proxy import VexataAPIProxy
except ImportError:
HAS_VEXATAPI = False
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import env_fallback
VXOS_VERSION = None
def get_version(iocs_json):
if not iocs_json:
raise Exception('Invalid IOC json')
active = filter(lambda x: x['mgmtRole'], iocs_json)
if not active:
raise Exception('Unable to detect active IOC')
active = active[0]
ver = active['swVersion']
if ver[0] != 'v':
raise Exception('Illegal version string')
ver = ver[1:ver.find('-')]
ver = map(int, ver.split('.'))
return tuple(ver)
def get_array(module):
"""Return storage array object or fail"""
global VXOS_VERSION
array = module.params['array']
user = module.params.get('user', None)
password = module.params.get('password', None)
validate = module.params.get('validate_certs')
if not HAS_VEXATAPI:
module.fail_json(msg='vexatapi library is required for this module. '
'To install, use `pip install vexatapi`')
if user and password:
system = VexataAPIProxy(array, user, password, verify_cert=validate)
else:
module.fail_json(msg='The user/password are required to be passed in to '
'the module as arguments or by setting the '
'VEXATA_USER and VEXATA_PASSWORD environment variables.')
try:
if system.test_connection():
VXOS_VERSION = get_version(system.iocs())
return system
else:
module.fail_json(msg='Test connection to array failed.')
except Exception as e:
module.fail_json(msg='Vexata API access failed: {0}'.format(to_native(e)))
def argument_spec():
"""Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
return dict(
array=dict(type='str',
required=True),
user=dict(type='str',
fallback=(env_fallback, ['VEXATA_USER'])),
password=dict(type='str',
no_log=True,
fallback=(env_fallback, ['VEXATA_PASSWORD'])),
validate_certs=dict(type='bool',
required=False,
default=False),
)
def required_together():
"""Return the default list used for the required_together argument to AnsibleModule"""
return [['user', 'password']]
def size_to_MiB(size):
"""Convert a '<integer>[MGT]' string to MiB, return -1 on error."""
quant = size[:-1]
exponent = size[-1]
if not quant.isdigit() or exponent not in 'MGT':
return -1
quant = int(quant)
if exponent == 'G':
quant <<= 10
elif exponent == 'T':
quant <<= 20
return quant
| gpl-3.0 |
mrquim/mrquimrepo | repo/plugin.video.Rising.Tides/resources/modules/js2py/prototypes/jsregexp.py | 34 | 1251 |
class RegExpPrototype:
def toString():
flags = u''
if this.glob:
flags += u'g'
if this.ignore_case:
flags += u'i'
if this.multiline:
flags += u'm'
v = this.value if this.value else '(?:)'
return u'/%s/'%v + flags
def test(string):
return Exec(this, string) is not this.null
def exec2(string): # will be changed to exec in base.py. cant name it exec here
return Exec(this, string)
def Exec(this, string):
if this.Class!='RegExp':
raise this.MakeError('TypeError', 'RegExp.prototype.exec is not generic!')
string = string.to_string()
length = len(string)
i = this.get('lastIndex').to_int() if this.glob else 0
matched = False
while not matched:
if i < 0 or i > length:
this.put('lastIndex', this.Js(0))
return this.null
matched = this.match(string.value, i)
i += 1
start, end = matched.span()[0]+i-1, matched.span()[1]+i-1
if this.glob:
this.put('lastIndex', this.Js(end))
arr = this.Js([this.Js(e) for e in [matched.group()]+list(matched.groups())])
arr.put('index', this.Js(start))
arr.put('input', string)
return arr
| gpl-2.0 |
boooka/GeoPowerOff | venv/lib/python2.7/site-packages/django/contrib/sessions/backends/cached_db.py | 67 | 2846 | """
Cached, database-backed sessions.
"""
import logging
from django.conf import settings
from django.contrib.sessions.backends.db import SessionStore as DBStore
from django.core.cache import caches
from django.core.exceptions import SuspiciousOperation
from django.utils import timezone
from django.utils.encoding import force_text
KEY_PREFIX = "django.contrib.sessions.cached_db"
class SessionStore(DBStore):
"""
Implements cached, database backed sessions.
"""
def __init__(self, session_key=None):
self._cache = caches[settings.SESSION_CACHE_ALIAS]
super(SessionStore, self).__init__(session_key)
@property
def cache_key(self):
return KEY_PREFIX + self._get_or_create_session_key()
def load(self):
try:
data = self._cache.get(self.cache_key, None)
except Exception:
# Some backends (e.g. memcache) raise an exception on invalid
# cache keys. If this happens, reset the session. See #17810.
data = None
if data is None:
# Duplicate DBStore.load, because we need to keep track
# of the expiry date to set it properly in the cache.
try:
s = Session.objects.get(
session_key=self.session_key,
expire_date__gt=timezone.now()
)
data = self.decode(s.session_data)
self._cache.set(self.cache_key, data,
self.get_expiry_age(expiry=s.expire_date))
except (Session.DoesNotExist, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
self.create()
data = {}
return data
def exists(self, session_key):
if (KEY_PREFIX + session_key) in self._cache:
return True
return super(SessionStore, self).exists(session_key)
def save(self, must_create=False):
super(SessionStore, self).save(must_create)
self._cache.set(self.cache_key, self._session, self.get_expiry_age())
def delete(self, session_key=None):
super(SessionStore, self).delete(session_key)
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
self._cache.delete(KEY_PREFIX + session_key)
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete(self.session_key)
self.create()
# At bottom to avoid circular import
from django.contrib.sessions.models import Session
| apache-2.0 |
JohnUrban/fast5tools | bin/samGenomicWindows.py | 1 | 5426 | #!/usr/bin/env python2.7
import argparse
from collections import defaultdict
from fast5tools.samclass import *
from fast5tools.samops import *
parser = argparse.ArgumentParser(description="""
DESCRIPTION
Given a SAM file (with F5:Z: info attached) that is sorted by read name:
- get the alignment or set of splitread alignments for each read
- determine most likely genomic region read came from (assuming no structural variation)
- if one alignment, assume it comes from there
- if multiple alignments,
check for overlap of their individual genomic windows (alignment adjusted for clipping on each side + flank/buffer)
if no merges,
use majority or longest alignment (majority is longest alignment that also meets a majority threshold)
if there is a single merge -- i.e. they all come from same genomic region (and perhaps required to be ordered and stranded - see options) -
use merged result from merged genomic windows
if there is 1 or more merges (but still more than 1 genomic region)
see if longest merge has a 'spanning alignment' longer than longest/majority alignment
if so use that, if not use the longest/majority alignment
- report on alignments and merges in all cases
- get coordinates for a window that surrounds that chosen genomic region
- this is the chosen genomic window for that read
- coordinates for genomic window should be proportional to read length + some extra buffering/flanking sequence
- print out gw coordinates, notes on choice, F5 info, and perhaps genomic sequence chosen
flank=0.25, merge_dist=0, majority=0.5, require_order=False, require_strand=False, reference=False
flank = Add buffer/flank lengths to each side of a genomic window in two ways:
(1) int > 1 adds/subtracts that int.
(2) float [0,1] adds/subtracts that proportion of read length
NOTE: 1 defaults to 100% of read length, not 1 bp
merge_dist:
allows a gap up to d between intervals to still be an overlap - default 0
majority
threshold to exceed to be considered a majority.
require_order
when True, alignments must be ordered as they appear in the read to be considered a valid merge.
Defaults to False as noisy alignments could easily break this. Status is reported in output anyway.
require_strand
when True, alignments must ALL be on the same strand to be considered a valid merge.
Defaults to False as noisy alignments could easily break this. Status is reported in output anyway.
""", formatter_class= argparse.RawTextHelpFormatter)
parser_input = parser.add_mutually_exclusive_group(required=True)
parser_input.add_argument('--sam', '-s',
type= str, default=False,
help='''Input file in SAM format.''')
## FOR NOW, MUST BE SAM -- NOT BAM -- but can be STDIN SAM
##parser_input.add_argument('--bam', '-b',
## type= str, default=False,
## help='''Input file in BAM format.''')
parser.add_argument('--flank', '-f', type=float, default=0.25,
help=''' ''')
parser.add_argument('--merge_dist', '-m', type=int, default=0,
help=''' ''')
parser.add_argument('--majority', '-M', type=float, default=0.5,
help=''' ''')
parser.add_argument('--require_order', '-ro', action='store_true', default=False,
help=''' ''')
parser.add_argument('--require_strand', '-rs', action='store_true', default=False,
help=''' ''')
parser.add_argument('--reference', '-r', type=str, default=False,
help=''' Path to reference genome file to be used to extract sequences corresponding to genomic windows identified.
Optional. Sequences will be tagged on to an additional end column if provided.''')
parser.add_argument('--getF5info', '-f5', action='store_true', default=False,
help='''Return F5:Z: field from fast5tools in output.
This is from extracting fasta/fastq using fast5tofastx.py with --comments and --samflag''')
parser.add_argument('--getBCinfo', '-BC', action='store_true', default=False,
help=''' Return BC:Z: field from fast5tools in output.
This is from creating fasta/fastq from output of fast5_sw_bardecoder.py specified with --sequence/--quals,
and merging all desired barcode info into string following BC:Z:''')
parser.add_argument('--do_not_adjust_window_for_clipping', '-noadjust', action='store_true', default=False,
help=''' By default, the genomic window is pushed out at least as far as it would need to be to include soft/hard clipped regions at 5'/3' ends. This turns it off.''')
args = parser.parse_args()
get_genomic_windows(samfilepath=args.sam, flank=args.flank, merge_dist=args.merge_dist, majority=args.majority, require_order=args.require_order, require_strand=args.require_strand, reference=args.reference, getF5field=args.getF5info, getBCfield=args.getBCinfo, adjust_for_clipping_in_output=(not args.do_not_adjust_window_for_clipping))
| mit |
nchammas/spark | python/pyspark/ml/util.py | 7 | 22026 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
import time
import uuid
from pyspark import SparkContext, since
from pyspark.ml.common import inherit_doc
from pyspark.sql import SparkSession
from pyspark.util import VersionUtils
def _jvm():
"""
Returns the JVM view associated with SparkContext. Must be called
after SparkContext is initialized.
"""
jvm = SparkContext._jvm
if jvm:
return jvm
else:
raise AttributeError("Cannot load _jvm from SparkContext. Is SparkContext initialized?")
class Identifiable(object):
"""
Object with a unique ID.
"""
def __init__(self):
#: A unique id for the object.
self.uid = self._randomUID()
def __repr__(self):
return self.uid
@classmethod
def _randomUID(cls):
"""
Generate a unique string id for the object. The default implementation
concatenates the class name, "_", and 12 random hex chars.
"""
return str(cls.__name__ + "_" + uuid.uuid4().hex[-12:])
@inherit_doc
class BaseReadWrite(object):
"""
Base class for MLWriter and MLReader. Stores information about the SparkContext
and SparkSession.
.. versionadded:: 2.3.0
"""
def __init__(self):
self._sparkSession = None
def session(self, sparkSession):
"""
Sets the Spark Session to use for saving/loading.
"""
self._sparkSession = sparkSession
return self
@property
def sparkSession(self):
"""
Returns the user-specified Spark Session or the default.
"""
if self._sparkSession is None:
self._sparkSession = SparkSession.builder.getOrCreate()
return self._sparkSession
@property
def sc(self):
"""
Returns the underlying `SparkContext`.
"""
return self.sparkSession.sparkContext
@inherit_doc
class MLWriter(BaseReadWrite):
"""
Utility class that can save ML instances.
.. versionadded:: 2.0.0
"""
def __init__(self):
super(MLWriter, self).__init__()
self.shouldOverwrite = False
self.optionMap = {}
def _handleOverwrite(self, path):
from pyspark.ml.wrapper import JavaWrapper
_java_obj = JavaWrapper._new_java_obj("org.apache.spark.ml.util.FileSystemOverwrite")
wrapper = JavaWrapper(_java_obj)
wrapper._call_java("handleOverwrite", path, True, self.sparkSession._jsparkSession)
def save(self, path):
"""Save the ML instance to the input path."""
if self.shouldOverwrite:
self._handleOverwrite(path)
self.saveImpl(path)
def saveImpl(self, path):
"""
save() handles overwriting and then calls this method. Subclasses should override this
method to implement the actual saving of the instance.
"""
raise NotImplementedError("MLWriter is not yet implemented for type: %s" % type(self))
def overwrite(self):
"""Overwrites if the output path already exists."""
self.shouldOverwrite = True
return self
def option(self, key, value):
"""
Adds an option to the underlying MLWriter. See the documentation for the specific model's
writer for possible options. The option name (key) is case-insensitive.
"""
self.optionMap[key.lower()] = str(value)
return self
@inherit_doc
class GeneralMLWriter(MLWriter):
"""
Utility class that can save ML instances in different formats.
.. versionadded:: 2.4.0
"""
def format(self, source):
"""
Specifies the format of ML export (e.g. "pmml", "internal", or the fully qualified class
name for export).
"""
self.source = source
return self
@inherit_doc
class JavaMLWriter(MLWriter):
"""
(Private) Specialization of :py:class:`MLWriter` for :py:class:`JavaParams` types
"""
def __init__(self, instance):
super(JavaMLWriter, self).__init__()
_java_obj = instance._to_java()
self._jwrite = _java_obj.write()
def save(self, path):
"""Save the ML instance to the input path."""
if not isinstance(path, str):
raise TypeError("path should be a string, got type %s" % type(path))
self._jwrite.save(path)
def overwrite(self):
"""Overwrites if the output path already exists."""
self._jwrite.overwrite()
return self
def option(self, key, value):
self._jwrite.option(key, value)
return self
def session(self, sparkSession):
"""Sets the Spark Session to use for saving."""
self._jwrite.session(sparkSession._jsparkSession)
return self
@inherit_doc
class GeneralJavaMLWriter(JavaMLWriter):
"""
(Private) Specialization of :py:class:`GeneralMLWriter` for :py:class:`JavaParams` types
"""
def __init__(self, instance):
super(GeneralJavaMLWriter, self).__init__(instance)
def format(self, source):
"""
Specifies the format of ML export (e.g. "pmml", "internal", or the fully qualified class
name for export).
"""
self._jwrite.format(source)
return self
@inherit_doc
class MLWritable(object):
"""
Mixin for ML instances that provide :py:class:`MLWriter`.
.. versionadded:: 2.0.0
"""
def write(self):
"""Returns an MLWriter instance for this ML instance."""
raise NotImplementedError("MLWritable is not yet implemented for type: %r" % type(self))
def save(self, path):
"""Save this ML instance to the given path, a shortcut of 'write().save(path)'."""
self.write().save(path)
@inherit_doc
class JavaMLWritable(MLWritable):
"""
(Private) Mixin for ML instances that provide :py:class:`JavaMLWriter`.
"""
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@inherit_doc
class GeneralJavaMLWritable(JavaMLWritable):
"""
(Private) Mixin for ML instances that provide :py:class:`GeneralJavaMLWriter`.
"""
def write(self):
"""Returns an GeneralMLWriter instance for this ML instance."""
return GeneralJavaMLWriter(self)
@inherit_doc
class MLReader(BaseReadWrite):
"""
Utility class that can load ML instances.
.. versionadded:: 2.0.0
"""
def __init__(self):
super(MLReader, self).__init__()
def load(self, path):
"""Load the ML instance from the input path."""
raise NotImplementedError("MLReader is not yet implemented for type: %s" % type(self))
@inherit_doc
class JavaMLReader(MLReader):
"""
(Private) Specialization of :py:class:`MLReader` for :py:class:`JavaParams` types
"""
def __init__(self, clazz):
super(JavaMLReader, self).__init__()
self._clazz = clazz
self._jread = self._load_java_obj(clazz).read()
def load(self, path):
"""Load the ML instance from the input path."""
if not isinstance(path, str):
raise TypeError("path should be a string, got type %s" % type(path))
java_obj = self._jread.load(path)
if not hasattr(self._clazz, "_from_java"):
raise NotImplementedError("This Java ML type cannot be loaded into Python currently: %r"
% self._clazz)
return self._clazz._from_java(java_obj)
def session(self, sparkSession):
"""Sets the Spark Session to use for loading."""
self._jread.session(sparkSession._jsparkSession)
return self
@classmethod
def _java_loader_class(cls, clazz):
"""
Returns the full class name of the Java ML instance. The default
implementation replaces "pyspark" by "org.apache.spark" in
the Python full class name.
"""
java_package = clazz.__module__.replace("pyspark", "org.apache.spark")
if clazz.__name__ in ("Pipeline", "PipelineModel"):
# Remove the last package name "pipeline" for Pipeline and PipelineModel.
java_package = ".".join(java_package.split(".")[0:-1])
return java_package + "." + clazz.__name__
@classmethod
def _load_java_obj(cls, clazz):
"""Load the peer Java object of the ML instance."""
java_class = cls._java_loader_class(clazz)
java_obj = _jvm()
for name in java_class.split("."):
java_obj = getattr(java_obj, name)
return java_obj
@inherit_doc
class MLReadable(object):
"""
Mixin for instances that provide :py:class:`MLReader`.
.. versionadded:: 2.0.0
"""
@classmethod
def read(cls):
"""Returns an MLReader instance for this class."""
raise NotImplementedError("MLReadable.read() not implemented for type: %r" % cls)
@classmethod
def load(cls, path):
"""Reads an ML instance from the input path, a shortcut of `read().load(path)`."""
return cls.read().load(path)
@inherit_doc
class JavaMLReadable(MLReadable):
"""
(Private) Mixin for instances that provide JavaMLReader.
"""
@classmethod
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@inherit_doc
class DefaultParamsWritable(MLWritable):
"""
Helper trait for making simple :py:class:`Params` types writable. If a :py:class:`Params`
class stores all data as :py:class:`Param` values, then extending this trait will provide
a default implementation of writing saved instances of the class.
This only handles simple :py:class:`Param` types; e.g., it will not handle
:py:class:`Dataset`. See :py:class:`DefaultParamsReadable`, the counterpart to this trait.
.. versionadded:: 2.3.0
"""
def write(self):
"""Returns a DefaultParamsWriter instance for this class."""
from pyspark.ml.param import Params
if isinstance(self, Params):
return DefaultParamsWriter(self)
else:
raise TypeError("Cannot use DefautParamsWritable with type %s because it does not " +
" extend Params.", type(self))
@inherit_doc
class DefaultParamsWriter(MLWriter):
"""
Specialization of :py:class:`MLWriter` for :py:class:`Params` types
Class for writing Estimators and Transformers whose parameters are JSON-serializable.
.. versionadded:: 2.3.0
"""
def __init__(self, instance):
super(DefaultParamsWriter, self).__init__()
self.instance = instance
def saveImpl(self, path):
DefaultParamsWriter.saveMetadata(self.instance, path, self.sc)
@staticmethod
def extractJsonParams(instance, skipParams):
paramMap = instance.extractParamMap()
jsonParams = {param.name: value for param, value in paramMap.items()
if param.name not in skipParams}
return jsonParams
@staticmethod
def saveMetadata(instance, path, sc, extraMetadata=None, paramMap=None):
"""
Saves metadata + Params to: path + "/metadata"
- class
- timestamp
- sparkVersion
- uid
- paramMap
- defaultParamMap (since 2.4.0)
- (optionally, extra metadata)
Parameters
----------
extraMetadata : dict, optional
Extra metadata to be saved at same level as uid, paramMap, etc.
paramMap : dict, optional
If given, this is saved in the "paramMap" field.
"""
metadataPath = os.path.join(path, "metadata")
metadataJson = DefaultParamsWriter._get_metadata_to_save(instance,
sc,
extraMetadata,
paramMap)
sc.parallelize([metadataJson], 1).saveAsTextFile(metadataPath)
@staticmethod
def _get_metadata_to_save(instance, sc, extraMetadata=None, paramMap=None):
"""
Helper for :py:meth:`DefaultParamsWriter.saveMetadata` which extracts the JSON to save.
This is useful for ensemble models which need to save metadata for many sub-models.
Notes
-----
See :py:meth:`DefaultParamsWriter.saveMetadata` for details on what this includes.
"""
uid = instance.uid
cls = instance.__module__ + '.' + instance.__class__.__name__
# User-supplied param values
params = instance._paramMap
jsonParams = {}
if paramMap is not None:
jsonParams = paramMap
else:
for p in params:
jsonParams[p.name] = params[p]
# Default param values
jsonDefaultParams = {}
for p in instance._defaultParamMap:
jsonDefaultParams[p.name] = instance._defaultParamMap[p]
basicMetadata = {"class": cls, "timestamp": int(round(time.time() * 1000)),
"sparkVersion": sc.version, "uid": uid, "paramMap": jsonParams,
"defaultParamMap": jsonDefaultParams}
if extraMetadata is not None:
basicMetadata.update(extraMetadata)
return json.dumps(basicMetadata, separators=[',', ':'])
@inherit_doc
class DefaultParamsReadable(MLReadable):
"""
Helper trait for making simple :py:class:`Params` types readable.
If a :py:class:`Params` class stores all data as :py:class:`Param` values,
then extending this trait will provide a default implementation of reading saved
instances of the class. This only handles simple :py:class:`Param` types;
e.g., it will not handle :py:class:`Dataset`. See :py:class:`DefaultParamsWritable`,
the counterpart to this trait.
.. versionadded:: 2.3.0
"""
@classmethod
def read(cls):
"""Returns a DefaultParamsReader instance for this class."""
return DefaultParamsReader(cls)
@inherit_doc
class DefaultParamsReader(MLReader):
"""
Specialization of :py:class:`MLReader` for :py:class:`Params` types
Default :py:class:`MLReader` implementation for transformers and estimators that
contain basic (json-serializable) params and no data. This will not handle
more complex params or types with data (e.g., models with coefficients).
.. versionadded:: 2.3.0
"""
def __init__(self, cls):
super(DefaultParamsReader, self).__init__()
self.cls = cls
@staticmethod
def __get_class(clazz):
"""
Loads Python class from its name.
"""
parts = clazz.split('.')
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
def load(self, path):
metadata = DefaultParamsReader.loadMetadata(path, self.sc)
py_type = DefaultParamsReader.__get_class(metadata['class'])
instance = py_type()
instance._resetUid(metadata['uid'])
DefaultParamsReader.getAndSetParams(instance, metadata)
return instance
@staticmethod
def loadMetadata(path, sc, expectedClassName=""):
"""
Load metadata saved using :py:meth:`DefaultParamsWriter.saveMetadata`
Parameters
----------
path : str
sc : :py:class:`pyspark.SparkContext`
expectedClassName : str, optional
If non empty, this is checked against the loaded metadata.
"""
metadataPath = os.path.join(path, "metadata")
metadataStr = sc.textFile(metadataPath, 1).first()
loadedVals = DefaultParamsReader._parseMetaData(metadataStr, expectedClassName)
return loadedVals
@staticmethod
def _parseMetaData(metadataStr, expectedClassName=""):
"""
Parse metadata JSON string produced by :py:meth`DefaultParamsWriter._get_metadata_to_save`.
This is a helper function for :py:meth:`DefaultParamsReader.loadMetadata`.
Parameters
----------
metadataStr : str
JSON string of metadata
expectedClassName : str, optional
If non empty, this is checked against the loaded metadata.
"""
metadata = json.loads(metadataStr)
className = metadata['class']
if len(expectedClassName) > 0:
assert className == expectedClassName, "Error loading metadata: Expected " + \
"class name {} but found class name {}".format(expectedClassName, className)
return metadata
@staticmethod
def getAndSetParams(instance, metadata, skipParams=None):
"""
Extract Params from metadata, and set them in the instance.
"""
# Set user-supplied param values
for paramName in metadata['paramMap']:
param = instance.getParam(paramName)
if skipParams is None or paramName not in skipParams:
paramValue = metadata['paramMap'][paramName]
instance.set(param, paramValue)
# Set default param values
majorAndMinorVersions = VersionUtils.majorMinorVersion(metadata['sparkVersion'])
major = majorAndMinorVersions[0]
minor = majorAndMinorVersions[1]
# For metadata file prior to Spark 2.4, there is no default section.
if major > 2 or (major == 2 and minor >= 4):
assert 'defaultParamMap' in metadata, "Error loading metadata: Expected " + \
"`defaultParamMap` section not found"
for paramName in metadata['defaultParamMap']:
paramValue = metadata['defaultParamMap'][paramName]
instance._setDefault(**{paramName: paramValue})
@staticmethod
def isPythonParamsInstance(metadata):
return metadata['class'].startswith('pyspark.ml.')
@staticmethod
def loadParamsInstance(path, sc):
"""
Load a :py:class:`Params` instance from the given path, and return it.
This assumes the instance inherits from :py:class:`MLReadable`.
"""
metadata = DefaultParamsReader.loadMetadata(path, sc)
if DefaultParamsReader.isPythonParamsInstance(metadata):
pythonClassName = metadata['class']
else:
pythonClassName = metadata['class'].replace("org.apache.spark", "pyspark")
py_type = DefaultParamsReader.__get_class(pythonClassName)
instance = py_type.load(path)
return instance
@inherit_doc
class HasTrainingSummary(object):
"""
Base class for models that provides Training summary.
.. versionadded:: 3.0.0
"""
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model
instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary of the model trained on the training set. An exception is thrown if
no summary exists.
"""
return (self._call_java("summary"))
class MetaAlgorithmReadWrite:
@staticmethod
def isMetaEstimator(pyInstance):
from pyspark.ml import Estimator, Pipeline
from pyspark.ml.tuning import _ValidatorParams
from pyspark.ml.classification import OneVsRest
return isinstance(pyInstance, Pipeline) or isinstance(pyInstance, OneVsRest) or \
(isinstance(pyInstance, Estimator) and isinstance(pyInstance, _ValidatorParams))
@staticmethod
def getAllNestedStages(pyInstance):
from pyspark.ml import Pipeline, PipelineModel
from pyspark.ml.tuning import _ValidatorParams
from pyspark.ml.classification import OneVsRest, OneVsRestModel
# TODO: We need to handle `RFormulaModel.pipelineModel` here after Pyspark RFormulaModel
# support pipelineModel property.
if isinstance(pyInstance, Pipeline):
pySubStages = pyInstance.getStages()
elif isinstance(pyInstance, PipelineModel):
pySubStages = pyInstance.stages
elif isinstance(pyInstance, _ValidatorParams):
raise ValueError('PySpark does not support nested validator.')
elif isinstance(pyInstance, OneVsRest):
pySubStages = [pyInstance.getClassifier()]
elif isinstance(pyInstance, OneVsRestModel):
pySubStages = [pyInstance.getClassifier()] + pyInstance.models
else:
pySubStages = []
nestedStages = []
for pySubStage in pySubStages:
nestedStages.extend(MetaAlgorithmReadWrite.getAllNestedStages(pySubStage))
return [pyInstance] + nestedStages
@staticmethod
def getUidMap(instance):
nestedStages = MetaAlgorithmReadWrite.getAllNestedStages(instance)
uidMap = {stage.uid: stage for stage in nestedStages}
if len(nestedStages) != len(uidMap):
raise RuntimeError(f'{instance.__class__.__module__}.{instance.__class__.__name__}'
f'.load found a compound estimator with stages with duplicate '
f'UIDs. List of UIDs: {list(uidMap.keys())}.')
return uidMap
| apache-2.0 |
DigitalCampus/django-oppia | api/resources/course.py | 1 | 9945 | import json
import os
import re
import shutil
import xmltodict
import zipfile
from django.conf import settings
from django.conf.urls import url
from django.core.exceptions import MultipleObjectsReturned
from django.db.models import Q
from django.http import HttpResponse, Http404
from django.utils.translation import ugettext_lazy as _
from tastypie import fields
from tastypie.authentication import ApiKeyAuthentication, Authentication
from tastypie.authorization import ReadOnlyAuthorization, Authorization
from tastypie.resources import ModelResource
from tastypie.utils import trailing_slash
from api.serializers import CourseJSONSerializer
from oppia.models import Tracker, Course, CourseCategory
from oppia.signals import course_downloaded
STR_COURSE_NOT_FOUND = _(u"Course not found")
def get_course_from_shortname(resource, bundle, lookup):
object_list = resource.apply_filters(bundle.request,
{'shortname': lookup})
if len(object_list) <= 0:
raise resource._meta.object_class.DoesNotExist(
"Couldn't find an course with shortname '%s'." % (lookup))
elif len(object_list) > 1:
raise MultipleObjectsReturned(
"More than one course with shortname '%s'." % (lookup))
return object_list
class CourseResource(ModelResource):
class Meta:
queryset = Course.objects.all()
resource_name = 'course'
allowed_methods = ['get']
fields = ['id',
'title',
'version',
'shortname',
'priority',
'is_draft',
'description',
'author',
'username',
'organisation']
authentication = ApiKeyAuthentication()
authorization = ReadOnlyAuthorization()
serializer = CourseJSONSerializer()
always_return_data = True
include_resource_uri = True
def obj_get(self, bundle, **kwargs):
"""
Overriden get method to perform a direct lookup if we are searching
by shortname instead of pk
"""
lookup = kwargs[self._meta.detail_uri_name]
if re.search('[a-zA-Z]', lookup):
object_list = get_course_from_shortname(self, bundle, lookup)
bundle.obj = object_list[0]
self.authorized_read_detail(object_list, bundle)
return bundle.obj
else:
return super().obj_get(bundle, **kwargs)
def get_object_list(self, request):
if request.user.is_staff:
return Course.objects.filter(is_archived=False) \
.order_by('-priority', 'title')
else:
return Course.objects.filter(is_archived=False) \
.filter(
Q(is_draft=False) |
(Q(is_draft=True) & Q(user=request.user))) \
.order_by('-priority', 'title')
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/download%s$"
% (self._meta.resource_name, trailing_slash()),
self.wrap_view('download_course'), name="api_download_course"),
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/activity%s$"
% (self._meta.resource_name, trailing_slash()),
self.wrap_view('download_activity'),
name="api_download_activity"),
]
def get_course(self, request, **kwargs):
self.is_authenticated(request)
self.throttle_check(request)
pk = kwargs.pop('pk', None)
try:
if request.user.is_staff:
course = self._meta.queryset.get(pk=pk, is_archived=False)
else:
course = self._meta.queryset \
.filter(
Q(is_draft=False) |
(Q(is_draft=True) & Q(user=request.user)) |
(Q(is_draft=True)
& Q(coursepermissions__user=request.user))) \
.distinct().get(pk=pk, is_archived=False)
except Course.DoesNotExist:
raise Http404(STR_COURSE_NOT_FOUND)
except ValueError:
try:
if request.user.is_staff:
course = self._meta.queryset.get(shortname=pk,
is_archived=False)
else:
course = self._meta.queryset \
.filter(
Q(is_draft=False) |
(Q(is_draft=True) & Q(user=request.user)) |
(Q(is_draft=True)
& Q(coursepermissions__user=request.user))) \
.distinct().get(shortname=pk, is_archived=False)
except Course.DoesNotExist:
raise Http404(STR_COURSE_NOT_FOUND)
return course
def download_course(self, request, **kwargs):
course = self.get_course(request, **kwargs)
file_to_download = course.getAbsPath()
has_completed_trackers = Tracker.has_completed_trackers(course,
request.user)
try:
if has_completed_trackers:
file_to_download = os.path.join(
settings.COURSE_UPLOAD_DIR,
"temp",
str(request.user.id) + "-" + course.filename)
shutil.copy2(course.getAbsPath(), file_to_download)
course_zip = zipfile.ZipFile(file_to_download, 'a')
if has_completed_trackers:
course_zip.writestr(course.shortname + "/tracker.xml",
Tracker.to_xml_string(course,
request.user))
course_zip.close()
binary_file = open(file_to_download, 'rb')
response = HttpResponse(binary_file.read(),
content_type='application/zip')
binary_file.close()
response['Content-Length'] = os.path.getsize(file_to_download)
response['Content-Disposition'] = \
'attachment; filename="%s"' % (course.filename)
except IOError:
raise Http404(STR_COURSE_NOT_FOUND)
course_downloaded.send(sender=self, course=course, request=request)
return response
def download_activity(self, request, **kwargs):
course = self.get_course(request, **kwargs)
return HttpResponse(Tracker.to_xml_string(course,
request.user),
content_type='text/xml')
def dehydrate(self, bundle):
bundle.data['url'] = bundle.request.build_absolute_uri(
bundle.data['resource_uri'] + 'download/')
# make sure title is shown as json object (not string representation \
# of one)
bundle.data['title'] = json.loads(bundle.data['title'])
try:
bundle.data['description'] = json.loads(bundle.data['description'])
except json.JSONDecodeError:
pass
course = Course.objects.get(pk=bundle.obj.pk)
if course and course.user:
bundle.data['author'] = course.user.first_name \
+ " " \
+ course.user.last_name
bundle.data['username'] = course.user.username
bundle.data['organisation'] = course.user.userprofile.organisation
return bundle
class CourseCategoryResource(ModelResource):
course = fields.ToOneField('api.resource.course.CourseResource',
'course',
full=True)
class Meta:
queryset = CourseCategory.objects.all()
allowed_methods = ['get']
resource_name = 'coursetag'
fields = ['id', 'course', 'category']
include_resource_uri = False
authentication = ApiKeyAuthentication()
authorization = ReadOnlyAuthorization()
always_return_data = True
class CourseStructureResource(ModelResource):
class Meta:
queryset = Course.objects.filter(is_draft=False, is_archived=False)
resource_name = 'coursestructure'
allowed_methods = ['get']
fields = ['shortname',
'id',
'structure']
authentication = Authentication()
authorization = Authorization()
serializer = CourseJSONSerializer()
always_return_data = True
include_resource_uri = True
def obj_get(self, bundle, **kwargs):
"""
Overriden get method to perform a direct lookup if we are searching
by shortname instead of pk
"""
lookup = kwargs[self._meta.detail_uri_name]
if re.search('[a-zA-Z]', lookup):
object_list = get_course_from_shortname(self, bundle, lookup)
return_obj = object_list[0]
else:
return_obj = super().obj_get(bundle, **kwargs)
# check the module.xml is on disk
path = os.path.join(settings.MEDIA_ROOT,
'courses',
return_obj.shortname,
'module.xml')
if not os.path.isfile(path):
raise self._meta.object_class.DoesNotExist()
return return_obj
def dehydrate(self, bundle):
path = os.path.join(settings.MEDIA_ROOT,
'courses',
bundle.obj.shortname,
'module.xml')
with open(path) as fd:
doc = xmltodict.parse(fd.read())
bundle.data['structure'] = json.dumps(doc)
return bundle
| gpl-3.0 |
resmo/ansible | test/units/modules/network/fortimanager/test_fmgr_secprof_voip.py | 38 | 2698 | # Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from ansible.module_utils.network.fortimanager.fortimanager import FortiManagerHandler
import pytest
try:
from ansible.modules.network.fortimanager import fmgr_secprof_voip
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
def load_fixtures():
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') + "/{filename}.json".format(
filename=os.path.splitext(os.path.basename(__file__))[0])
try:
with open(fixture_path, "r") as fixture_file:
fixture_data = json.load(fixture_file)
except IOError:
return []
return [fixture_data]
@pytest.fixture(autouse=True)
def module_mock(mocker):
connection_class_mock = mocker.patch('ansible.module_utils.basic.AnsibleModule')
return connection_class_mock
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortimanager.fmgr_secprof_voip.Connection')
return connection_class_mock
@pytest.fixture(scope="function", params=load_fixtures())
def fixture_data(request):
func_name = request.function.__name__.replace("test_", "")
return request.param.get(func_name, None)
fmg_instance = FortiManagerHandler(connection_mock, module_mock)
def test_fmgr_voip_profile_modify(fixture_data, mocker):
mocker.patch("ansible.module_utils.network.fortimanager.fortimanager.FortiManagerHandler.process_request",
side_effect=fixture_data)
# Test using fixture 1 #
output = fmgr_secprof_voip.fmgr_voip_profile_modify(fmg_instance, fixture_data[0]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
# Test using fixture 2 #
output = fmgr_secprof_voip.fmgr_voip_profile_modify(fmg_instance, fixture_data[1]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
| gpl-3.0 |
jtux270/translate | ovirt/3.6_source/packaging/setup/plugins/ovirt-engine-common/base/network/process_firewalld_services.py | 8 | 3043 | #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Process firewalld services
Parse the result
"""
import os
import libxml2
from otopi import constants as otopicons
from otopi import util
from ovirt_engine import util as outil
from ovirt_engine_setup import constants as osetupcons
@util.export
class Process(object):
_instance = None
def __init__(self, environment):
self._processed = False
self._environment = environment
@classmethod
def getInstance(clz, environment):
if clz._instance is None:
clz._instance = Process(environment=environment)
return clz._instance
@property
def environment(self):
return self._environment
def process_firewalld_services(self):
if not self._processed:
for service in self.environment[
osetupcons.NetEnv.FIREWALLD_SERVICES
]:
self.environment[
otopicons.NetEnv.FIREWALLD_SERVICE_PREFIX +
service['name']
] = outil.processTemplate(
template=os.path.join(
osetupcons.FileLocations.OVIRT_FIREWALLD_CONFIG,
service['directory'],
'%s.xml.in' % service['name'],
),
subst=self.environment[osetupcons.NetEnv.FIREWALLD_SUBST],
)
self._processed = True
def parseFirewalld(self, format, portSeparator='-'):
self.process_firewalld_services()
ret = ''
for content in [
content
for key, content in self.environment.items()
if key.startswith(
otopicons.NetEnv.FIREWALLD_SERVICE_PREFIX
)
]:
doc = None
ctx = None
try:
doc = libxml2.parseDoc(content)
ctx = doc.xpathNewContext()
nodes = ctx.xpathEval("/service/port")
for node in nodes:
ret += format.format(
protocol=node.prop('protocol'),
port=node.prop('port').replace('-', portSeparator),
)
finally:
if doc is not None:
doc.freeDoc()
if ctx is not None:
ctx.xpathFreeContext()
return ret
# vim: expandtab tabstop=4 shiftwidth=4
| gpl-3.0 |
magicrub/mavlink | pymavlink/generator/lib/genxmlif/xmlif4Dom.py | 79 | 5669 | #
# genxmlif, Release 0.9.0
# file: xmlif4Dom.py
#
# XML interface class to the 4DOM library
#
# history:
# 2005-04-25 rl created
# 2008-07-01 rl Limited support of XInclude added
#
# Copyright (c) 2005-2008 by Roland Leuthe. All rights reserved.
#
# --------------------------------------------------------------------
# The generix XML interface is
#
# Copyright (c) 2005-2008 by Roland Leuthe
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
import urllib
from xml.dom.ext.reader.Sax2 import Reader, XmlDomGenerator
from xml.sax._exceptions import SAXParseException
from ..genxmlif import XMLIF_4DOM, GenXmlIfError
from xmlifUtils import convertToAbsUrl
from xmlifDom import XmlInterfaceDom, XmlIfBuilderExtensionDom, InternalDomTreeWrapper, InternalDomElementWrapper
class XmlInterface4Dom (XmlInterfaceDom):
#####################################################
# for description of the interface methods see xmlifbase.py
#####################################################
def __init__ (self, verbose, useCaching, processXInclude):
XmlInterfaceDom.__init__ (self, verbose, useCaching, processXInclude)
self.xmlIfType = XMLIF_4DOM
if self.verbose:
print "Using 4Dom interface module..."
def parse (self, file, baseUrl="", internalOwnerDoc=None):
absUrl = convertToAbsUrl (file, baseUrl)
fp = urllib.urlopen (absUrl)
return self._parseStream (fp, file, absUrl, internalOwnerDoc)
def parseString (self, text, baseUrl="", internalOwnerDoc=None):
import cStringIO
fp = cStringIO.StringIO(text)
absUrl = convertToAbsUrl ("", baseUrl)
return self._parseStream (fp, "", absUrl, internalOwnerDoc)
def _parseStream (self, fp, file, absUrl, internalOwnerDoc):
reader = Reader(validate=0, keepAllWs=0, catName=None,
saxHandlerClass=ExtXmlDomGenerator, parser=None)
reader.handler.extinit(file, absUrl, reader.parser, self)
if internalOwnerDoc != None:
ownerDoc = internalOwnerDoc.document
else:
ownerDoc = None
try:
tree = reader.fromStream(fp, ownerDoc)
fp.close()
except SAXParseException, errInst:
fp.close()
raise GenXmlIfError, "%s: SAXParseException: %s" %(file, str(errInst))
treeWrapper = reader.handler.treeWrapper
# XInclude support
if self.processXInclude:
if internalOwnerDoc == None:
internalOwnerDoc = treeWrapper.getTree()
self.xInclude (treeWrapper.getRootNode(), absUrl, internalOwnerDoc)
return treeWrapper
###################################################
# Extended DOM generator class derived from XmlDomGenerator
# extended to store related line numbers, file/URL names and
# defined namespaces in the node object
class ExtXmlDomGenerator(XmlDomGenerator, XmlIfBuilderExtensionDom):
def __init__(self, keepAllWs=0):
XmlDomGenerator.__init__(self, keepAllWs)
self.treeWrapper = None
def extinit (self, filePath, absUrl, parser, xmlIf):
self.filePath = filePath
self.absUrl = absUrl
self.parser = parser
self.xmlIf = xmlIf
def startElement(self, name, attribs):
XmlDomGenerator.startElement(self, name, attribs)
if not self.treeWrapper:
self.treeWrapper = self.xmlIf.treeWrapperClass(self, InternalDomTreeWrapper(self._rootNode), self.xmlIf.useCaching)
XmlIfBuilderExtensionDom.__init__(self, self.filePath, self.absUrl, self.treeWrapper, self.xmlIf.elementWrapperClass)
curNode = self._nodeStack[-1]
internal4DomElementWrapper = InternalDomElementWrapper(curNode, self.treeWrapper.getTree())
curNs = self._namespaces.items()
try:
curNs.remove( (None,None) )
except:
pass
XmlIfBuilderExtensionDom.startElementHandler (self, internal4DomElementWrapper, self.parser.getLineNumber(), curNs)
def endElement(self, name):
curNode = self._nodeStack[-1]
XmlIfBuilderExtensionDom.endElementHandler (self, curNode.xmlIfExtInternalWrapper, self.parser.getLineNumber())
XmlDomGenerator.endElement(self, name)
| lgpl-3.0 |
cheungpat/sqlalchemy-utils | tests/types/test_tsvector.py | 4 | 2424 | import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import TSVECTOR
from sqlalchemy_utils import TSVectorType
from tests import TestCase
class TestTSVector(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
search_index = sa.Column(
TSVectorType(name, regconfig='pg_catalog.finnish')
)
def __repr__(self):
return 'User(%r)' % self.id
self.User = User
def test_generates_table(self):
assert 'search_index' in self.User.__table__.c
def test_type_reflection(self):
reflected_metadata = sa.schema.MetaData()
table = sa.schema.Table(
'user',
reflected_metadata,
autoload=True,
autoload_with=self.engine
)
assert isinstance(table.c['search_index'].type, TSVECTOR)
def test_catalog_and_columns_as_args(self):
type_ = TSVectorType('name', 'age', regconfig='pg_catalog.simple')
assert type_.columns == ('name', 'age')
assert type_.options['regconfig'] == 'pg_catalog.simple'
def test_match(self):
expr = self.User.search_index.match(u'something')
assert str(expr.compile(self.connection)) == (
'''"user".search_index @@ to_tsquery('pg_catalog.finnish', '''
'''%(search_index_1)s)'''
)
def test_concat(self):
assert str(self.User.search_index | self.User.search_index) == (
'"user".search_index || "user".search_index'
)
def test_match_concatenation(self):
concat = self.User.search_index | self.User.search_index
bind = self.session.bind
assert str(concat.match('something').compile(bind)) == (
'("user".search_index || "user".search_index) @@ '
"to_tsquery('pg_catalog.finnish', %(param_1)s)"
)
def test_match_with_catalog(self):
expr = self.User.search_index.match(
u'something',
postgresql_regconfig='pg_catalog.simple'
)
assert str(expr.compile(self.connection)) == (
'''"user".search_index @@ to_tsquery('pg_catalog.simple', '''
'''%(search_index_1)s)'''
)
| bsd-3-clause |
Versent/ansible | lib/ansible/plugins/connections/jail.py | 131 | 7291 | # Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# and chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
# (c) 2013, Michael Scherer <misc@zarb.org>
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import distutils.spawn
import traceback
import os
import shlex
import subprocess
from ansible import errors
from ansible.utils.unicode import to_bytes
from ansible.callbacks import vvv
import ansible.constants as C
BUFSIZE = 65536
class Connection(object):
''' Local BSD Jail based connections '''
def _search_executable(self, executable):
cmd = distutils.spawn.find_executable(executable)
if not cmd:
raise errors.AnsibleError("%s command not found in PATH") % executable
return cmd
def list_jails(self):
p = subprocess.Popen([self.jls_cmd, '-q', 'name'],
cwd=self.runner.basedir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return stdout.split()
def get_jail_path(self):
p = subprocess.Popen([self.jls_cmd, '-j', self.jail, '-q', 'path'],
cwd=self.runner.basedir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
# remove \n
return stdout[:-1]
def __init__(self, runner, host, port, *args, **kwargs):
self.jail = host
self.runner = runner
self.host = host
self.has_pipelining = False
self.become_methods_supported=C.BECOME_METHODS
if os.geteuid() != 0:
raise errors.AnsibleError("jail connection requires running as root")
self.jls_cmd = self._search_executable('jls')
self.jexec_cmd = self._search_executable('jexec')
if not self.jail in self.list_jails():
raise errors.AnsibleError("incorrect jail name %s" % self.jail)
self.host = host
# port is unused, since this is local
self.port = port
def connect(self, port=None):
''' connect to the jail; nothing to do here '''
vvv("THIS IS A LOCAL JAIL DIR", host=self.jail)
return self
# a modifier
def _generate_cmd(self, executable, cmd):
if executable:
local_cmd = [self.jexec_cmd, self.jail, executable, '-c', cmd]
else:
# Prev to python2.7.3, shlex couldn't handle unicode type strings
cmd = to_bytes(cmd)
cmd = shlex.split(cmd)
local_cmd = [self.jexec_cmd, self.jail]
local_cmd += cmd
return local_cmd
def _buffered_exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None, stdin=subprocess.PIPE):
''' run a command on the jail. This is only needed for implementing
put_file() get_file() so that we don't have to read the whole file
into memory.
compared to exec_command() it looses some niceties like being able to
return the process's exit code immediately.
'''
if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
# We enter zone as root so we ignore privilege escalation (probably need to fix in case we have to become a specific used [ex: postgres admin])?
local_cmd = self._generate_cmd(executable, cmd)
vvv("EXEC %s" % (local_cmd), host=self.jail)
p = subprocess.Popen(local_cmd, shell=False,
cwd=self.runner.basedir,
stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command on the jail '''
p = self._buffered_exec_command(cmd, tmp_path, become_user, sudoable, executable, in_data)
stdout, stderr = p.communicate()
return (p.returncode, '', stdout, stderr)
def put_file(self, in_path, out_path):
''' transfer a file from local to jail '''
vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail)
try:
with open(in_path, 'rb') as in_file:
try:
p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), None, stdin=in_file)
except OSError:
raise errors.AnsibleError("jail connection requires dd command in the jail")
try:
stdout, stderr = p.communicate()
except:
traceback.print_exc()
raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
if p.returncode != 0:
raise errors.AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
except IOError:
raise errors.AnsibleError("file or module does not exist at: %s" % in_path)
def fetch_file(self, in_path, out_path):
''' fetch a file from jail to local '''
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail)
try:
p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), None)
except OSError:
raise errors.AnsibleError("jail connection requires dd command in the jail")
with open(out_path, 'wb+') as out_file:
try:
chunk = p.stdout.read(BUFSIZE)
while chunk:
out_file.write(chunk)
chunk = p.stdout.read(BUFSIZE)
except:
traceback.print_exc()
raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
stdout, stderr = p.communicate()
if p.returncode != 0:
raise errors.AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def close(self):
''' terminate the connection; nothing to do here '''
pass
| gpl-3.0 |
yilei0620/3D_Conditional_Gan | GenSample_obj.py | 1 | 4544 | import sys
sys.path.append('..')
import os
import json
from time import time
import numpy as np
from sklearn.externals import joblib
import scipy
from scipy import io
# from matplotlib import pyplot as plt
# from sklearn.externals import joblib
import theano
import theano.tensor as T
from lib import activations
from lib import updates
from lib import inits
from lib.rng import py_rng, np_rng
from lib.ops import batchnorm, conv_cond_concat, conv, dropout
from lib.theano_utils import floatX, sharedX
from lib.data_utils import OneHot, shuffle, iter_data
from lib.metrics import nnc_score, nnd_score
from load import load_shapenet_train, load_shapenet_test
relu = activations.Rectify()
sigmoid = activations.Sigmoid()
lrelu = activations.LeakyRectify()
bce = T.nnet.binary_crossentropy
parameters = {'objectNumber': 2, 'Nz' : 200, 'Channel' :(1,64,128,256,512), 'kernal':(4,4,4,4), 'batchsize': 50, 'Convlayersize':(64,32,16,8,4), 'Genlrt' : 0.001, 'Discrimlrt' : 0.00001 , 'beta' : 0.5, 'l2':2.5e-5, 'Genk' : 2 , 'niter':50, 'niter_decay' : 150}
for p in parameters:
tmp = p + " = parameters[p]"
exec(tmp)
# print conditional,type(batchsize),Channel[-1],kernal
gifn = inits.Normal(scale=0.02)
difn = inits.Normal(scale=0.02)
## filter_shape: (output channels, input channels, filter height, filter width, filter depth)
## load the parameters
# gen_params = [gw1, gw2, gw3, gw4, gw5, gwx]
# discrim_params = [dw1, dw2, dw3, dw4, dw5, dwy]
temp = joblib.load('models%d/50_gen_params.jl'%objectNumber)
gw1 = sharedX(temp[0])
gg1 = sharedX(temp[1])
gb1 = sharedX(temp[2])
gw2 = sharedX(temp[3])
gg2 = sharedX(temp[4])
gb2 = sharedX(temp[5])
gw3 = sharedX(temp[6])
gg3 = sharedX(temp[7])
gb3 = sharedX(temp[8])
gw4 = sharedX(temp[9])
gg4 = sharedX(temp[10])
gb4 = sharedX(temp[11])
gwx = sharedX(temp[12])
gen_params = [gw1, gg1, gb1, gw2, gg2, gb2, gw3, gg3, gb3, gw4 ,gg4, gb4, gwx]
##
def gen(Z, w1, g1, b1, w2, g2, b2, w3, g3, b3, w4, g4, b4, wx):
Gl1 = relu(batchnorm(T.dot(Z, w1), g=g1, b=b1))
Gl1 = Gl1.reshape((Gl1.shape[0],Channel[-1],Convlayersize[-1],Convlayersize[-1],Convlayersize[-1]))
input_shape = (None , None,Convlayersize[-1],Convlayersize[-1],Convlayersize[-1])
filter_shape = (Channel[-1] , Channel[-2], kernal[-1], kernal[-1], kernal[-1])
Gl2 = relu(batchnorm(conv(Gl1,w2,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv'),g = g2, b = b2))
input_shape = (None , None,Convlayersize[-2],Convlayersize[-2],Convlayersize[-2])
filter_shape = (Channel[-2] , Channel[-3], kernal[-2], kernal[-2], kernal[-2])
Gl3 = relu(batchnorm(conv(Gl2,w3,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv'),g = g3, b = b3))
input_shape = (None , None,Convlayersize[-3],Convlayersize[-3],Convlayersize[-3])
filter_shape = (Channel[-3] , Channel[-4], kernal[-3], kernal[-3], kernal[-3])
Gl4 = relu(batchnorm(conv(Gl3,w4,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv'),g = g4, b= b4))
input_shape = (None, None, Convlayersize[-4],Convlayersize[-4],Convlayersize[-4])
filter_shape = (Channel[-4], Channel[-5], kernal[-4], kernal[-4], kernal[-4])
GlX = sigmoid(conv(Gl4,wx,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv'))
return GlX
X = T.tensor5()
Z = T.matrix()
gX = gen(Z, *gen_params)
print 'COMPILING'
t = time()
# _train_g = theano.function([X, Z, Y], cost, updates=g_updates)
# _train_d = theano.function([X, Z, Y], cost, updates=d_updates)
_gen = theano.function([Z], gX)
print '%.2f seconds to compile theano functions'%(time()-t)
# trX, trY, ntrain = load_shapenet_train()
n = 10
nbatch = 10
rng = np.random.RandomState(int(time()))
# sample_ymb = floatX(np.asarray(np.eye(3)))
z_dist = scipy.io.loadmat('Z_dist_class2.mat')
z_mean = z_dist['mean']
z_mean = np.reshape(z_mean,(Nz,1))
z_std = z_dist['std']
z_std = np.reshape(z_std,(Nz,1))
def gen_z(z_dist,nbatch):
ret = np.zeros((nbatch,Nz))
for j in xrange(Nz):
z_tmp = np_rng.normal(z_mean[j],z_std[j],nbatch)
ret[:,j] = z_tmp
# print ret
return ret
try:
os.mkdir('Gen_models%d'%objectNumber)
except:
pass
for j in xrange(n/nbatch):
sample_zmb = floatX(gen_z(z_dist,nbatch))
samples = np.asarray(_gen(sample_zmb))
for i in xrange(nbatch):
io.savemat('Gen_models%d/Gen_example_%d.mat'%(objectNumber,nbatch*j+i),{'instance':samples[i,:,:,:],'Z':sample_zmb[i,:]})
# niter = 1
# niter_decay = 1
| mit |
yewang15215/django | tests/gis_tests/tests.py | 12 | 4177 | import sys
import unittest
from django.core.exceptions import ImproperlyConfigured
from django.db import ProgrammingError
from django.utils import six
try:
from django.contrib.gis.db.backends.postgis.operations import PostGISOperations
HAS_POSTGRES = True
except ImportError:
HAS_POSTGRES = False
except ImproperlyConfigured as e:
# If psycopg is installed but not geos, the import path hits
# django.contrib.gis.geometry.backend which will "helpfully" convert
# an ImportError into an ImproperlyConfigured.
# Here, we make sure we're only catching this specific case and not another
# ImproperlyConfigured one.
if e.args and e.args[0].startswith('Could not import user-defined GEOMETRY_BACKEND'):
HAS_POSTGRES = False
else:
six.reraise(*sys.exc_info())
if HAS_POSTGRES:
class FakeConnection(object):
def __init__(self):
self.settings_dict = {
'NAME': 'test',
}
class FakePostGISOperations(PostGISOperations):
def __init__(self, version=None):
self.version = version
self.connection = FakeConnection()
def _get_postgis_func(self, func):
if func == 'postgis_lib_version':
if self.version is None:
raise ProgrammingError
else:
return self.version
elif func == 'version':
pass
else:
raise NotImplementedError('This function was not expected to be called')
@unittest.skipUnless(HAS_POSTGRES, "The psycopg2 driver is needed for these tests")
class TestPostGISVersionCheck(unittest.TestCase):
"""
The PostGIS version check parses correctly the version numbers
"""
def test_get_version(self):
expect = '1.0.0'
ops = FakePostGISOperations(expect)
actual = ops.postgis_lib_version()
self.assertEqual(expect, actual)
def test_version_classic_tuple(self):
expect = ('1.2.3', 1, 2, 3)
ops = FakePostGISOperations(expect[0])
actual = ops.postgis_version_tuple()
self.assertEqual(expect, actual)
def test_version_dev_tuple(self):
expect = ('1.2.3dev', 1, 2, 3)
ops = FakePostGISOperations(expect[0])
actual = ops.postgis_version_tuple()
self.assertEqual(expect, actual)
def test_valid_version_numbers(self):
versions = [
('1.3.0', 1, 3, 0),
('2.1.1', 2, 1, 1),
('2.2.0dev', 2, 2, 0),
]
for version in versions:
ops = FakePostGISOperations(version[0])
actual = ops.spatial_version
self.assertEqual(version[1:], actual)
def test_invalid_version_numbers(self):
versions = ['nope', '123']
for version in versions:
ops = FakePostGISOperations(version)
with self.assertRaises(Exception):
ops.spatial_version
def test_no_version_number(self):
ops = FakePostGISOperations()
with self.assertRaises(ImproperlyConfigured):
ops.spatial_version
def test_version_dependent_funcs(self):
"""
Resolve names of functions renamed and deprecated in PostGIS 2.2.0
depending on PostGIS version.
Remove when dropping support for PostGIS 2.1.
"""
ops = FakePostGISOperations('2.2.0')
self.assertEqual(ops.spatial_function_name('DistanceSphere'), 'ST_DistanceSphere')
self.assertEqual(ops.spatial_function_name('DistanceSpheroid'), 'ST_DistanceSpheroid')
self.assertEqual(ops.spatial_function_name('LengthSpheroid'), 'ST_LengthSpheroid')
self.assertEqual(ops.spatial_function_name('MemSize'), 'ST_MemSize')
ops = FakePostGISOperations('2.1.0')
self.assertEqual(ops.spatial_function_name('DistanceSphere'), 'ST_distance_sphere')
self.assertEqual(ops.spatial_function_name('DistanceSpheroid'), 'ST_distance_spheroid')
self.assertEqual(ops.spatial_function_name('LengthSpheroid'), 'ST_length_spheroid')
self.assertEqual(ops.spatial_function_name('MemSize'), 'ST_mem_size')
| bsd-3-clause |
GeoscienceAustralia/agdc | src/dbutil.py | 4 | 25902 | #!/usr/bin/env python
#===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
"""dbutil.py - PostgreSQL database utilities for testing.
This module provides easy access to the test database server, and
provides a way to create, load, save and drop databases from this server.
It also provides wrapper classes for psycopg2 database connections that
implement utility queries as methods.
"""
import os
import sys
import logging
import random
import subprocess
import re
import psycopg2
#
# Root directory for test resources.
#
TEST_RESOURCES_ROOT = '/g/data1/v10/test_resources'
#
# Setup information for the test server. This might be better off loaded
# from a config file, but this will do for now. The password is kept
# in a .pgpass file to avoid saving it in versioned files. This is likely
# a better solution than recording the password either here or in a config
# file.
#
TESTSERVER_PARAMS = {
'name': 'test_server',
'host': '130.56.244.226',
'port': '6432',
'user': 'cube_tester',
'superuser': 'cube_admin'
}
#
# Database connection constants. These would be better off being defaults
# for items that can be overridden by a configuration file.
#
CONNECT_TIMEOUT = 60
MAINTENANCE_DB = 'postgres'
TEMPLATE_DB = 'template0'
USE_PGBOUNCER = True
PGBOUNCER_DB = 'pgbouncer'
#
# Random string constants. These set the parameters for random strings
# appended to database names by the random_name utility function. The intent
# is to make temporary database names (most likely) unique to avoid clashes.
# The current format is 9 decimal digits.
#
RANDOM_STR_MIN = 1
RANDOM_STR_MAX = 999999999
RANDOM_STR_FORMAT = "%09d"
#
# Server class
#
class Server(object):
"""Abstraction of a database server.
Gathers all the parameters that describe a server or how to work
with it, and provides services that use this information."""
def __init__(self, params):
self.name = params['name']
self.host = params['host']
self.port = params['port']
self.user = params['user']
self.superuser = params['superuser']
def connect(self, dbname, superuser=False, autocommit=True):
"""Create a pscopg2 connection to a database and return it.
dbname: The database to connect to.
superuser: Set to True to connect as the superuser, otherwise
connect as the user.
autocommit: Set to False to turn off autocommit, otherwise
autocommit will be turned on."""
user = (self.superuser if superuser else self.user)
dsn = ("dbname=%s host=%s port=%s user=%s connect_timeout=%s" %
(dbname, self.host, self.port, user, CONNECT_TIMEOUT))
conn = psycopg2.connect(dsn)
conn.autocommit = autocommit
return conn
def exists(self, dbname):
"""Returns True if the named database exists on the server."""
maint_conn = MaintenanceWrapper(
self.connect(MAINTENANCE_DB, superuser=True))
try:
result = maint_conn.exists(dbname)
finally:
maint_conn.close()
return result
def dblist(self):
"""Returns a list of the databases on the server."""
maint_conn = MaintenanceWrapper(
self.connect(MAINTENANCE_DB, superuser=True))
try:
result = maint_conn.dblist()
finally:
maint_conn.close()
return result
def load(self, dbname, save_dir, save_file):
"""Load the contents of a database from a file.
The database should be empty, and based off template0 or
equivalent. This method calls the psql command to do the load."""
save_path = os.path.join(save_dir, save_file)
load_cmd = ["psql",
"--dbname=%s" % dbname,
"--username=%s" % self.superuser,
"--host=%s" % self.host,
"--port=%s" % self.port,
"--file=%s" % save_path]
try:
subprocess.check_output(load_cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
# Make sure error output is in the error message.
message = ("%s: problem calling %s:\n%s" %
(__name__, err.cmd[0], err.output))
for k in range(len(load_cmd)):
message = message + load_cmd[k]
raise AssertionError(message)
def save(self, dbname, save_dir, save_file, table=None):
"""Save the contents of a database to a file.
This method calls the pg_dump command to do the save. This
dump is in sql script format so use psql to reload."""
save_path = os.path.join(save_dir, save_file)
save_cmd = ["pg_dump",
"--dbname=%s" % dbname,
"--username=%s" % self.superuser,
"--host=%s" % self.host,
"--port=%s" % self.port,
"--file=%s" % save_path]
if table:
save_cmd.append("--table=%s" % table)
try:
subprocess.check_output(save_cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
#Make sure error output is in the error message.
message = ("%s: problem calling %s:\n%s" %
(__name__, err.cmd[0], err.output))
raise AssertionError(message)
def copy_table_between_databases(self, dbname1, dbname2, table_name):
"""Copy a table from one database to another on the same server.
This method pipes the output of pg_dump to psql."""
dump_cmd = ["pg_dump",
"--dbname=%s" % dbname1,
"--username=%s" % self.superuser,
"--host=%s" % self.host,
"--port=%s" % self.port,
"--table=%s" % table_name]
load_cmd = ["psql",
"--dbname=%s" % dbname2,
"--username=%s" % self.superuser,
"--host=%s" % self.host,
"--port=%s" % self.port
]
try:
ps_dump = subprocess.Popen(dump_cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
dummy_output = subprocess.check_output(load_cmd,
stdin=ps_dump.stdout,
stderr=subprocess.STDOUT)
ps_dump.wait()
except subprocess.CalledProcessError as err:
#Make sure error output is in the error message.
message = ("%s: problem calling %s:\n%s" %
(__name__, err.cmd[0], err.output))
raise AssertionError(message)
def drop(self, dbname):
"""Drop the named database.
Connections are closed explicitly with try/finally blocks,
since they do not seem to be closed automatically in the
case of exceptions and this causes problems.
If pgbouncer is in use a pgbouncer pause command needs to
be issued before dropping the database. This will wait
until active transactions are complete."""
maint_conn = MaintenanceWrapper(
self.connect(MAINTENANCE_DB, superuser=True))
try:
if maint_conn.exists(dbname):
if USE_PGBOUNCER:
bouncer_conn = BouncerWrapper(
self.connect(PGBOUNCER_DB, superuser=True))
try:
bouncer_conn.pause(dbname)
maint_conn.drop(dbname)
finally:
bouncer_conn.close()
else:
maint_conn.drop(dbname)
finally:
maint_conn.close()
def create(self, dbname, save_dir=None, save_file=None,
template_db=TEMPLATE_DB):
"""Creates and loads a database from a file.
This method does a clean create and load of the named database
from the file 'savefile'. It drops an old database of the same
name if neccessary.
It uses template_db as the template database, which is copied
to create the new database.
If save_dir or save_file are None (or not specified), no
save file is loaded.
Connections are closed explicitly with try/finally blocks,
since they do not seem to be closed automatically in the
case of exceptions and this causes problems.
If pgbouncer is in use a pgbouncer pause command needs to
be issued before dropping the database. This will wait
until active transactions are complete. The pgbouncer
resume command is issued once the database is (re)created.
This is needed to prevent connection attempts to the new database
from hanging or returning errors if pgbouncer had pools set
up on the old database."""
maint_conn = MaintenanceWrapper(
self.connect(MAINTENANCE_DB, superuser=True))
try:
# Create the database, dropping it first if needed.
if USE_PGBOUNCER:
bouncer_conn = BouncerWrapper(
self.connect(PGBOUNCER_DB, superuser=True))
try:
if maint_conn.exists(dbname):
bouncer_conn.pause(dbname)
maint_conn.drop(dbname)
# To be used as a template, template_db must have
# no current connections.
bouncer_conn.kill(template_db)
maint_conn.create(dbname, template_db)
bouncer_conn.resume(dbname)
finally:
bouncer_conn.close()
else:
if maint_conn.exists(dbname):
maint_conn.drop(dbname)
maint_conn.create(dbname, template_db)
# Load the new database from the save file if necessary
if save_file is not None or save_dir is not None:
self.load(dbname, save_dir, save_file)
# Run ANALYSE on the newly loaded database
db_conn = ConnectionWrapper(self.connect(dbname, superuser=True))
try:
db_conn.analyse()
finally:
db_conn.close()
# All done
finally:
maint_conn.close()
#
# Connection wrappers.
#
class ConnectionWrapper(object):
"""Generic connection wrapper, inherited by the specific wrappers.
This is a wrapper for a psycopg2 database connection. It
passes on unknown attribute references to the wrapped connection
using __getattr__. The specific wrappers that inherit from this
implement queries and operations on the connection (self.conn)
as methods.
Some utility methods are implemented here. database_name is
useful for testing and error messages. analyse is used after
a database has been created."""
def __init__(self, conn):
self.conn = conn
def database_name(self):
"""Returns the name of the connected database."""
sql = ("SELECT catalog_name\n" +
"FROM information_schema.information_schema_catalog_name;")
with self.conn.cursor() as curs:
curs.execute(sql)
dbname = curs.fetchone()[0]
return dbname
def analyse(self):
"""Runs the ANALYSE command on the connected database."""
with self.conn.cursor() as curs:
curs.execute("ANALYSE;")
def __getattr__(self, attrname):
"""Delegate unknown attributes to the psycopg2 connection."""
return getattr(self.conn, attrname)
class MaintenanceWrapper(ConnectionWrapper):
"""Wrapper for a connection intented for maintenance commands."""
def exists(self, dbname):
"""Returns True if the named database exists."""
exists_sql = ("SELECT datname FROM pg_database\n" +
"WHERE datname = %(dbname)s;")
with self.conn.cursor() as curs:
curs.execute(exists_sql, {'dbname': dbname})
db_found = bool(curs.fetchone())
return db_found
def dblist(self):
"""Returns a list of the databases on the server."""
dblist_sql = "SELECT datname FROM pg_database;"
with self.conn.cursor() as curs:
curs.execute(dblist_sql)
result = [tup[0] for tup in curs.fetchall()]
return result
def drop(self, dbname):
"""Drops the named database."""
drop_sql = "DROP DATABASE %s;" % safe_name(dbname)
with self.conn.cursor() as curs:
curs.execute(drop_sql)
def create(self, dbname, template_db=TEMPLATE_DB):
"""Creates the named database."""
create_sql = ("CREATE DATABASE %s\n" % safe_name(dbname) +
"TEMPLATE %s;" % template_db)
with self.conn.cursor() as curs:
curs.execute(create_sql)
class BouncerWrapper(ConnectionWrapper):
"""Wrapper for a connection to the pgbouncer console pseudo-database.
Obviously these commands will not work if connected to an ordinary
database.
These commands will ignore errors since pgbouncer may
not know about the database the operations are being done on, but
the commands have to be run anyway in case it does."""
def pause(self, dbname):
"""Tells pgbouncer to pause the named database.
This should cause pgbouncer to disconnect from dbname, first
waiting for any queries to complete. This allows the database
to be dropped.
"""
pause_sql = "PAUSE %s;" % safe_name(dbname)
with self.conn.cursor() as cur:
try:
cur.execute(pause_sql)
except psycopg2.DatabaseError:
pass
def kill(self, dbname):
"""Tells pgbouncer to kill its connections to the named database.
This should cause pgbouncer to disconnect from dbname without waiting
for any queries to complete.
"""
kill_sql = "KILL %s;" % safe_name(dbname)
with self.conn.cursor() as cur:
try:
cur.execute(kill_sql)
except psycopg2.DatabaseError:
pass
def resume(self, dbname):
"""Tells pgbouncer to resume work on the named database.
If this is not called and the database was previously
paused then connection attempts will hang or give errors."""
resume_sql = "RESUME %s;" % safe_name(dbname)
with self.conn.cursor() as cur:
try:
cur.execute(resume_sql)
except psycopg2.DatabaseError:
pass
#
# Utility functions
#
def random_name(basename=""):
"""Returns a database name with a 9 digit random number appended."""
random_str = (RANDOM_STR_FORMAT %
random.randint(RANDOM_STR_MIN, RANDOM_STR_MAX))
return basename + "_" + random_str
def safe_name(dbname):
"""Returns a database name with non letter, digit, _ characters removed."""
char_list = [c for c in dbname if c.isalnum() or c == '_']
return "".join(char_list)
def resources_directory(*names):
"""Returns the path to a test resources directory, creating it if needed.
The path of the directory is TEST_RESOURCES_ROOT/name1/name2/...
where name1, name2, ... are the names passed in as parameters.
"""
test_dir = os.path.join(TEST_RESOURCES_ROOT, *names)
if not os.path.isdir(test_dir):
# Allow group permissions on the directory we are about to create
old_umask = os.umask(0o007)
# Make the directories
os.makedirs(test_dir)
# Put back the old umask
os.umask(old_umask)
return test_dir
def version_or_user(version=None, user=None):
"""Returns the version or user for a test resources directory.
Returns the version string, unless version is 'user', in which case
the user string is returned instead. Defaults are described below.
version: The version of the datacube code. This is expected to be either
'develop', 'user', or a version number. If not given it is taken
from the DATACUBE_VERSION environment variable. If the DATACUBE_VERSION
variable is not defined it is taken to be 'user'.
user: The user name. This is used in place of version if version is 'user'.
If this is not defined it is taken from the USER environment variable.
"""
if not version:
# Using 'not version' rather than 'version is None' here because
# "" is NOT a valid version.
version = os.environ.get('DATACUBE_VERSION', 'user')
if version == 'user':
if not user:
# Using 'not user' rather than 'user is None' here because
# "" is NOT a valid user.
user = os.environ['USER']
return user
else:
return version
def input_directory(module, suite, version=None, user=None):
"""Returns a path to a test input directory, creating it if needed.
The path of the directory is
TEST_RESOURCES_ROOT/version/input/module/suite/. If the version is
'user' then the user argument takes the place of version in the path.
module: The name of the module being tested, eg 'dbcompare'.
suite: The name of the test suite of test class containting the test,
eg 'TestReporter'.
version: The version of the datacube code. This is expected to be either
'develop', 'user', or a version number. If not given it is taken
from the DATACUBE_VERSION environment variable. If the DATACUBE_VERSION
variable is not defined it is taken to be 'user'.
user: The user name. This is used in place of version if version is 'user'.
If this is not defined it is taken from the USER environment variable.
The 'input' directory is for input or setup files for tests. The
files are expected to be named after the test that uses them.
"""
version = version_or_user(version, user)
return resources_directory(version, 'input', module, suite)
def output_directory(module, suite, user=None):
"""Returns the path to a test output directory, creating it if needed.
The path of the directory is TEST_RESOUCES_ROOT/user/output/module/suite/.
If user is not given, the environment variable USER is used as the
name of the user.
module: the name of the module being tested, eg 'dbcompare'
suite: the name of the test suite or test class containting the test,
eg 'TestReporter'
The 'output' directory is for the output of the tests. The files are
expected to be named after the test that produces them.
"""
version = version_or_user(version='user', user=user)
return resources_directory(version, 'output', module, suite)
def expected_directory(module, suite, version=None, user=None):
"""Returns a path to a test expected directory, creating it if needed.
The path of the directory is
TEST_RESOURCES_ROOT/version/expected/module/suite/. If the version is
'user' then the user argument takes the place of version in the path.
module: The name of the module being tested, eg 'dbcompare'.
suite: The name of the test suite of test class containting the test,
eg 'TestReporter'.
version: The version of the datacube code. This is expected to be either
'develop', 'user', or a version number. If not given it is taken
from the DATACUBE_VERSION environment variable. If the DATACUBE_VERSION
variable is not defined it is taken to be 'user'.
user: The user name. This is used in place of version if version is 'user'.
If this is not defined it is taken from the USER environment variable.
The 'expected' directory is for the expected output of the tests. The
files are expected to be named after the test that produces them. These
files are used to automate the tests by comparing output produced against
expected output.
"""
version = version_or_user(version, user)
return resources_directory(version, 'expected', module, suite)
def temp_directory(module, suite, test_dir, version=None, user=None):
"""Returns a path to a temp subdirectory, creating it if needed."""
version = version_or_user(version, user)
return resources_directory(version, test_dir, module, suite, 'temp')
def tile_root_directory(module, suite, test_dir, version=None, user=None):
"""Returns a path to a tile_root subdirectory, creating it if needed."""
version = version_or_user(version, user)
return resources_directory(version, test_dir, module, suite, 'tile_root')
def update_config_file(dbname, input_dir, output_dir, config_file_name,
output_file_name=None):
"""Creates a temporary agdc_default.config file by updating the database name.
This function returns the path to the updated config file.
dbname: the name of the database to connect to.
input_dir: the directory containing the config file template.
output_dir: the directory in which the updated config file will be written.
config_file_name: the name of the config file (template and updated).
output_file_name: the name of the updated config file - if this is not
specified, it is taken to be the same as the config_file_name.
"""
return update_config_file2({'dbname': dbname}, input_dir, output_dir,
config_file_name, output_file_name)
def update_config_file2(parameter_values_dict, input_dir, output_dir,
config_file_name, output_file_name=None):
"""Creates a temporary agdc_default.config file by updating those attributes
according to the dictionary parameter_values.
This function returns the path to the updated config file.
parameter_values_dict: a dictionary of parameter-values to be inserted
into the template config file
input_dir: the directory containing the config file template.
output_dir: the directory in which the updated config file will be written.
config_file_name: the name of the config template file.
output_file_name: the name of the updated config file - if this is not
specified, it is taken to be the same as the config_file_name.
"""
template_path = os.path.join(input_dir, config_file_name)
if output_file_name:
update_path = os.path.join(output_dir, output_file_name)
else:
update_path = os.path.join(output_dir, config_file_name)
with open(template_path) as template:
template_str = template.read()
update_str = template_str
for param, value in parameter_values_dict.items():
update_str = re.sub(r'^\s*%s\s*=[^\n\r]*(\r?)$' % param,
r'%s = %s\1' % (param, value),
update_str, flags=re.MULTILINE)
with open(update_path, 'w') as update:
update.write(update_str)
return update_path
def create_logger(name, logfile_path=None):
"""Creates a logger object in the datacube style.
This sets up a logger with handler, formatter, and level defined
as is usual for the datacube scripts. 'name' is the name of the
logger, __name__ (the current module) is a typical value.
If 'logfile_path' is set it is taken as the name of a log file,
which is opened in write mode and used to create the logger.
Otherwise sys.stdout is used."""
if logfile_path:
console_handler = logging.FileHandler(logfile_path, mode='w')
else:
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
console_formatter = logging.Formatter('%(message)s')
console_handler.setFormatter(console_formatter)
logger = logging.getLogger(name)
if logger.level == logging.NOTSET:
logger.setLevel(logging.DEBUG) # Default logging level for all modules
logger.addHandler(console_handler)
return logger
#
# Test server instance:
#
TESTSERVER = Server(TESTSERVER_PARAMS)
| bsd-3-clause |
735tesla/SneakPeep | unidecode/x0b9.py | 253 | 4704 | data = (
'ruk', # 0x00
'rut', # 0x01
'rup', # 0x02
'ruh', # 0x03
'rweo', # 0x04
'rweog', # 0x05
'rweogg', # 0x06
'rweogs', # 0x07
'rweon', # 0x08
'rweonj', # 0x09
'rweonh', # 0x0a
'rweod', # 0x0b
'rweol', # 0x0c
'rweolg', # 0x0d
'rweolm', # 0x0e
'rweolb', # 0x0f
'rweols', # 0x10
'rweolt', # 0x11
'rweolp', # 0x12
'rweolh', # 0x13
'rweom', # 0x14
'rweob', # 0x15
'rweobs', # 0x16
'rweos', # 0x17
'rweoss', # 0x18
'rweong', # 0x19
'rweoj', # 0x1a
'rweoc', # 0x1b
'rweok', # 0x1c
'rweot', # 0x1d
'rweop', # 0x1e
'rweoh', # 0x1f
'rwe', # 0x20
'rweg', # 0x21
'rwegg', # 0x22
'rwegs', # 0x23
'rwen', # 0x24
'rwenj', # 0x25
'rwenh', # 0x26
'rwed', # 0x27
'rwel', # 0x28
'rwelg', # 0x29
'rwelm', # 0x2a
'rwelb', # 0x2b
'rwels', # 0x2c
'rwelt', # 0x2d
'rwelp', # 0x2e
'rwelh', # 0x2f
'rwem', # 0x30
'rweb', # 0x31
'rwebs', # 0x32
'rwes', # 0x33
'rwess', # 0x34
'rweng', # 0x35
'rwej', # 0x36
'rwec', # 0x37
'rwek', # 0x38
'rwet', # 0x39
'rwep', # 0x3a
'rweh', # 0x3b
'rwi', # 0x3c
'rwig', # 0x3d
'rwigg', # 0x3e
'rwigs', # 0x3f
'rwin', # 0x40
'rwinj', # 0x41
'rwinh', # 0x42
'rwid', # 0x43
'rwil', # 0x44
'rwilg', # 0x45
'rwilm', # 0x46
'rwilb', # 0x47
'rwils', # 0x48
'rwilt', # 0x49
'rwilp', # 0x4a
'rwilh', # 0x4b
'rwim', # 0x4c
'rwib', # 0x4d
'rwibs', # 0x4e
'rwis', # 0x4f
'rwiss', # 0x50
'rwing', # 0x51
'rwij', # 0x52
'rwic', # 0x53
'rwik', # 0x54
'rwit', # 0x55
'rwip', # 0x56
'rwih', # 0x57
'ryu', # 0x58
'ryug', # 0x59
'ryugg', # 0x5a
'ryugs', # 0x5b
'ryun', # 0x5c
'ryunj', # 0x5d
'ryunh', # 0x5e
'ryud', # 0x5f
'ryul', # 0x60
'ryulg', # 0x61
'ryulm', # 0x62
'ryulb', # 0x63
'ryuls', # 0x64
'ryult', # 0x65
'ryulp', # 0x66
'ryulh', # 0x67
'ryum', # 0x68
'ryub', # 0x69
'ryubs', # 0x6a
'ryus', # 0x6b
'ryuss', # 0x6c
'ryung', # 0x6d
'ryuj', # 0x6e
'ryuc', # 0x6f
'ryuk', # 0x70
'ryut', # 0x71
'ryup', # 0x72
'ryuh', # 0x73
'reu', # 0x74
'reug', # 0x75
'reugg', # 0x76
'reugs', # 0x77
'reun', # 0x78
'reunj', # 0x79
'reunh', # 0x7a
'reud', # 0x7b
'reul', # 0x7c
'reulg', # 0x7d
'reulm', # 0x7e
'reulb', # 0x7f
'reuls', # 0x80
'reult', # 0x81
'reulp', # 0x82
'reulh', # 0x83
'reum', # 0x84
'reub', # 0x85
'reubs', # 0x86
'reus', # 0x87
'reuss', # 0x88
'reung', # 0x89
'reuj', # 0x8a
'reuc', # 0x8b
'reuk', # 0x8c
'reut', # 0x8d
'reup', # 0x8e
'reuh', # 0x8f
'ryi', # 0x90
'ryig', # 0x91
'ryigg', # 0x92
'ryigs', # 0x93
'ryin', # 0x94
'ryinj', # 0x95
'ryinh', # 0x96
'ryid', # 0x97
'ryil', # 0x98
'ryilg', # 0x99
'ryilm', # 0x9a
'ryilb', # 0x9b
'ryils', # 0x9c
'ryilt', # 0x9d
'ryilp', # 0x9e
'ryilh', # 0x9f
'ryim', # 0xa0
'ryib', # 0xa1
'ryibs', # 0xa2
'ryis', # 0xa3
'ryiss', # 0xa4
'rying', # 0xa5
'ryij', # 0xa6
'ryic', # 0xa7
'ryik', # 0xa8
'ryit', # 0xa9
'ryip', # 0xaa
'ryih', # 0xab
'ri', # 0xac
'rig', # 0xad
'rigg', # 0xae
'rigs', # 0xaf
'rin', # 0xb0
'rinj', # 0xb1
'rinh', # 0xb2
'rid', # 0xb3
'ril', # 0xb4
'rilg', # 0xb5
'rilm', # 0xb6
'rilb', # 0xb7
'rils', # 0xb8
'rilt', # 0xb9
'rilp', # 0xba
'rilh', # 0xbb
'rim', # 0xbc
'rib', # 0xbd
'ribs', # 0xbe
'ris', # 0xbf
'riss', # 0xc0
'ring', # 0xc1
'rij', # 0xc2
'ric', # 0xc3
'rik', # 0xc4
'rit', # 0xc5
'rip', # 0xc6
'rih', # 0xc7
'ma', # 0xc8
'mag', # 0xc9
'magg', # 0xca
'mags', # 0xcb
'man', # 0xcc
'manj', # 0xcd
'manh', # 0xce
'mad', # 0xcf
'mal', # 0xd0
'malg', # 0xd1
'malm', # 0xd2
'malb', # 0xd3
'mals', # 0xd4
'malt', # 0xd5
'malp', # 0xd6
'malh', # 0xd7
'mam', # 0xd8
'mab', # 0xd9
'mabs', # 0xda
'mas', # 0xdb
'mass', # 0xdc
'mang', # 0xdd
'maj', # 0xde
'mac', # 0xdf
'mak', # 0xe0
'mat', # 0xe1
'map', # 0xe2
'mah', # 0xe3
'mae', # 0xe4
'maeg', # 0xe5
'maegg', # 0xe6
'maegs', # 0xe7
'maen', # 0xe8
'maenj', # 0xe9
'maenh', # 0xea
'maed', # 0xeb
'mael', # 0xec
'maelg', # 0xed
'maelm', # 0xee
'maelb', # 0xef
'maels', # 0xf0
'maelt', # 0xf1
'maelp', # 0xf2
'maelh', # 0xf3
'maem', # 0xf4
'maeb', # 0xf5
'maebs', # 0xf6
'maes', # 0xf7
'maess', # 0xf8
'maeng', # 0xf9
'maej', # 0xfa
'maec', # 0xfb
'maek', # 0xfc
'maet', # 0xfd
'maep', # 0xfe
'maeh', # 0xff
)
| apache-2.0 |
marinho/geraldo | site/newsite/django_1_0/django/db/backends/mysql/creation.py | 9 | 1281 | # This dictionary maps Field objects to their associated MySQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
DATA_TYPES = {
'AutoField': 'integer AUTO_INCREMENT',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'IPAddressField': 'char(15)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PhoneNumberField': 'varchar(20)',
'PositiveIntegerField': 'integer UNSIGNED',
'PositiveSmallIntegerField': 'smallint UNSIGNED',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'longtext',
'TimeField': 'time',
'USStateField': 'varchar(2)',
}
| lgpl-3.0 |
lochiiconnectivity/libcloud | libcloud/dns/drivers/pointdns.py | 22 | 28897 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Point DNS Driver
"""
__all__ = [
'PointDNSException',
'Redirect',
'MailRedirect',
'PointDNSDriver'
]
import sys
try:
import simplejson as json
except ImportError:
import json
from libcloud.utils.py3 import httplib
from libcloud.common.types import ProviderError
from libcloud.common.types import MalformedResponseError
from libcloud.common.pointdns import PointDNSConnection
from libcloud.common.exceptions import BaseHTTPError
from libcloud.dns.types import Provider, RecordType
from libcloud.dns.types import ZoneDoesNotExistError
from libcloud.dns.types import RecordDoesNotExistError
from libcloud.dns.base import DNSDriver, Zone, Record
class PointDNSException(ProviderError):
def __init__(self, value, http_code, driver=None):
super(PointDNSException, self).__init__(value=value,
http_code=http_code,
driver=driver)
self.args = (http_code, value)
class Redirect(object):
"""
Point DNS redirect.
"""
def __init__(self, id, name, data, type, driver, zone, iframe=None,
query=False):
"""
:param id: Redirect id.
:type id: ``str``
:param name: The FQDN for the record.
:type name: ``str``
:param data: The data field. (redirect_to)
:type data: ``str``
:param type: The type of redirects 301, 302 or 0 for iframes.
:type type: ``str``
:param driver: DNSDriver instance.
:type driver: :class:`DNSDriver`
:param zone: Zone where redirect belongs.
:type zone: :class:`Zone`
:param iframe: Title of iframe (optional).
:type iframe: ``str``
:param query: boolean Information about including query string when
redirecting. (optional).
:type query: ``bool``
"""
self.id = str(id) if id else None
self.name = name
self.data = data
self.type = str(type) if type else None
self.driver = driver
self.zone = zone
self.iframe = iframe
self.query = query
def update(self, data, name=None, type=None, iframe=None, query=None):
return self.driver.ex_update_redirect(redirect=self, name=name,
data=data, type=type,
iframe=iframe, query=query)
def delete(self):
return self.driver.ex_delete_redirect(redirect=self)
def __repr__(self):
return ('<PointDNSRedirect: name=%s, data=%s, type=%s ...>' %
(self.name, self.data, self.type))
class MailRedirect(object):
"""
Point DNS mail redirect.
"""
def __init__(self, id, source, destination, zone, driver):
"""
:param id: MailRedirect id.
:type id: ``str``
:param source: The source address of mail redirect.
:type source: ``str``
:param destination: The destination address of mail redirect.
:type destination: ``str``
:param zone: Zone where mail redirect belongs.
:type zone: :class:`Zone`
:param driver: DNSDriver instance.
:type driver: :class:`DNSDriver`
"""
self.id = str(id) if id else None
self.source = source
self.destination = destination
self.zone = zone
self.driver = driver
def update(self, destination, source=None):
return self.driver.ex_update_mail_redirect(mail_r=self,
destination=destination,
source=None)
def delete(self):
return self.driver.ex_delete_mail_redirect(mail_r=self)
def __repr__(self):
return ('<PointDNSMailRedirect: source=%s, destination=%s,zone=%s ...>'
% (self.source, self.destination, self.zone.id))
class PointDNSDriver(DNSDriver):
type = Provider.POINTDNS
name = 'Point DNS'
website = 'https://pointhq.com/'
connectionCls = PointDNSConnection
RECORD_TYPE_MAP = {
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.ALIAS: 'ALIAS',
RecordType.CNAME: 'CNAME',
RecordType.MX: 'MX',
RecordType.NS: 'NS',
RecordType.PTR: 'PTR',
RecordType.SRV: 'SRV',
RecordType.SSHFP: 'SSHFP',
RecordType.TXT: 'TXT'
}
def list_zones(self):
"""
Return a list of zones.
:return: ``list`` of :class:`Zone`
"""
response = self.connection.request('/zones')
zones = self._to_zones(response.object)
return zones
def list_records(self, zone):
"""
Return a list of records for the provided zone.
:param zone: Zone to list records for.
:type zone: :class:`Zone`
:return: ``list`` of :class:`Record`
"""
response = self.connection.request('/zones/%s/records' % zone.id)
records = self._to_records(response.object, zone)
return records
def get_zone(self, zone_id):
"""
Return a Zone instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:rtype: :class:`Zone`
"""
try:
response = self.connection.request('/zones/%s' % zone_id)
except MalformedResponseError:
e = sys.exc_info()[1]
if e.body == 'Not found':
raise ZoneDoesNotExistError(driver=self,
value="The zone doesn't exists",
zone_id=zone_id)
raise e
zone = self._to_zone(response.object)
return zone
def get_record(self, zone_id, record_id):
"""
Return a Record instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:param record_id: ID of the required record
:type record_id: ``str``
:rtype: :class:`Record`
"""
try:
response = self.connection.request('/zones/%s/records/%s' %
(zone_id, record_id))
except MalformedResponseError:
e = sys.exc_info()[1]
if e.body == 'Not found':
raise RecordDoesNotExistError(value="Record doesn't exists",
driver=self,
record_id=record_id)
raise e
record = self._to_record(response.object, zone_id=zone_id)
return record
def create_zone(self, domain, type='master', ttl=None, extra=None):
"""
Create a new zone.
:param domain: Zone domain name (e.g. example.com)
:type domain: ``str``
:param type: Zone type (All zones are master by design).
:type type: ``str``
:param ttl: TTL for new records. (optional)
:type ttl: ``int``
:param extra: Extra attributes (driver specific). (optional)
:type extra: ``dict``
:rtype: :class:`Zone`
"""
r_json = {'name': domain}
if ttl is not None:
r_json['ttl'] = ttl
if extra is not None:
r_json.update(extra)
r_data = json.dumps({'zone': r_json})
try:
response = self.connection.request('/zones', method='POST',
data=r_data)
except BaseHTTPError:
e = sys.exc_info()[1]
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
zone = self._to_zone(response.object)
return zone
def create_record(self, name, zone, type, data, extra=None):
"""
Create a new record.
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param zone: Zone where the requested record is created.
:type zone: :class:`Zone`
:param type: DNS record type (A, AAAA, ...).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param extra: Extra attributes (driver specific). (optional)
:type extra: ``dict``
:rtype: :class:`Record`
"""
r_json = {'name': name, 'data': data, 'record_type': type}
if extra is not None:
r_json.update(extra)
r_data = json.dumps({'zone_record': r_json})
try:
response = self.connection.request('/zones/%s/records' % zone.id,
method='POST', data=r_data)
except BaseHTTPError:
e = sys.exc_info()[1]
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
record = self._to_record(response.object, zone=zone)
return record
def update_zone(self, zone, domain, type='master', ttl=None, extra=None):
"""
Update an existing zone.
:param zone: Zone to update.
:type zone: :class:`Zone`
:param domain: Zone domain name (e.g. example.com)
:type domain: ``str``
:param type: Zone type (All zones are master by design).
:type type: ``str``
:param ttl: TTL for new records. (optional)
:type ttl: ``int``
:param extra: Extra attributes (group, user-id). (optional)
:type extra: ``dict``
:rtype: :class:`Zone`
"""
r_json = {'name': domain}
if extra is not None:
r_json.update(extra)
r_data = json.dumps({'zone': r_json})
try:
response = self.connection.request('/zones/%s' % zone.id,
method='PUT', data=r_data)
except (BaseHTTPError, MalformedResponseError):
e = sys.exc_info()[1]
if isinstance(e, MalformedResponseError) and e.body == 'Not found':
raise ZoneDoesNotExistError(value="Zone doesn't exists",
driver=self,
zone_id=zone.id)
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
zone = self._to_zone(response.object)
return zone
def update_record(self, record, name, type, data, extra=None):
"""
Update an existing record.
:param record: Record to update.
:type record: :class:`Record`
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param type: DNS record type (A, AAAA, ...).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: :class:`Record`
"""
zone = record.zone
r_json = {'name': name, 'data': data, 'record_type': type}
if extra is not None:
r_json.update(extra)
r_data = json.dumps({'zone_record': r_json})
try:
response = self.connection.request('/zones/%s/records/%s' %
(zone.id, record.id),
method='PUT', data=r_data)
except (BaseHTTPError, MalformedResponseError):
e = sys.exc_info()[1]
if isinstance(e, MalformedResponseError) and e.body == 'Not found':
raise RecordDoesNotExistError(value="Record doesn't exists",
driver=self,
record_id=record.id)
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
record = self._to_record(response.object, zone=zone)
return record
def delete_zone(self, zone):
"""
Delete a zone.
Note: This will delete all the records belonging to this zone.
:param zone: Zone to delete.
:type zone: :class:`Zone`
:rtype: ``bool``
"""
try:
self.connection.request('/zones/%s' % zone.id, method='DELETE')
except MalformedResponseError:
e = sys.exc_info()[1]
if e.body == 'Not found':
raise ZoneDoesNotExistError(driver=self,
value="The zone doesn't exists",
zone_id=zone.id)
raise e
return True
def delete_record(self, record):
"""
Delete a record.
:param record: Record to delete.
:type record: :class:`Record`
:rtype: ``bool``
"""
zone_id = record.zone.id
record_id = record.id
try:
self.connection.request('/zones/%s/records/%s' % (zone_id,
record_id),
method='DELETE')
except MalformedResponseError:
e = sys.exc_info()[1]
if e.body == 'Not found':
raise RecordDoesNotExistError(value="Record doesn't exists",
driver=self,
record_id=record_id)
raise e
return True
def ex_list_redirects(self, zone):
"""
:param zone: Zone to list redirects for.
:type zone: :class:`Zone`
:rtype: ``list`` of :class:`Record`
"""
response = self.connection.request('/zones/%s/redirects' % zone.id)
redirects = self._to_redirects(response.object, zone)
return redirects
def ex_list_mail_redirects(self, zone):
"""
:param zone: Zone to list redirects for.
:type zone: :class:`Zone`
:rtype: ``list`` of :class:`MailRedirect`
"""
response = self.connection.request('/zones/%s/mail_redirects' %
zone.id)
mail_redirects = self._to_mail_redirects(response.object, zone)
return mail_redirects
def ex_create_redirect(self, redirect_to, name, type, zone, iframe=None,
query=None):
"""
:param redirect_to: The data field. (redirect_to)
:type redirect_to: ``str``
:param name: The FQDN for the record.
:type name: ``str``
:param type: The type of redirects 301, 302 or 0 for iframes.
:type type: ``str``
:param zone: Zone to list redirects for.
:type zone: :class:`Zone`
:param iframe: Title of iframe (optional).
:type iframe: ``str``
:param query: boolean Information about including query string when
redirecting. (optional).
:type query: ``bool``
:rtype: :class:`Record`
"""
r_json = {'name': name, 'redirect_to': redirect_to}
if type is not None:
r_json['redirect_type'] = type
if iframe is not None:
r_json['iframe_title'] = iframe
if query is not None:
r_json['redirect_query_string'] = query
r_data = json.dumps({'zone_redirect': r_json})
try:
response = self.connection.request('/zones/%s/redirects' % zone.id,
method='POST', data=r_data)
except (BaseHTTPError, MalformedResponseError):
e = sys.exc_info()[1]
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
redirect = self._to_redirect(response.object, zone=zone)
return redirect
def ex_create_mail_redirect(self, destination, source, zone):
"""
:param destination: The destination address of mail redirect.
:type destination: ``str``
:param source: The source address of mail redirect.
:type source: ``str``
:param zone: Zone to list redirects for.
:type zone: :class:`Zone`
:rtype: ``list`` of :class:`MailRedirect`
"""
r_json = {'destination_address': destination, 'source_address': source}
r_data = json.dumps({'zone_mail_redirect': r_json})
try:
response = self.connection.request('/zones/%s/mail_redirects' %
zone.id, method='POST',
data=r_data)
except (BaseHTTPError, MalformedResponseError):
e = sys.exc_info()[1]
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
mail_redirect = self._to_mail_redirect(response.object, zone=zone)
return mail_redirect
def ex_get_redirect(self, zone_id, redirect_id):
"""
:param zone: Zone to list redirects for.
:type zone: :class:`Zone`
:param redirect_id: Redirect id.
:type redirect_id: ``str``
:rtype: ``list`` of :class:`Redirect`
"""
try:
response = self.connection.request('/zones/%s/redirects/%s' %
(zone_id, redirect_id))
except (BaseHTTPError, MalformedResponseError):
e = sys.exc_info()[1]
if isinstance(e, MalformedResponseError) and e.body == 'Not found':
raise PointDNSException(value='Couldn\'t found redirect',
http_code=httplib.NOT_FOUND,
driver=self)
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
redirect = self._to_redirect(response.object, zone_id=zone_id)
return redirect
def ex_get_mail_redirects(self, zone_id, mail_r_id):
"""
:param zone: Zone to list redirects for.
:type zone: :class:`Zone`
:param mail_r_id: Mail redirect id.
:type mail_r_id: ``str``
:rtype: ``list`` of :class:`MailRedirect`
"""
try:
response = self.connection.request('/zones/%s/mail_redirects/%s' %
(zone_id, mail_r_id))
except (BaseHTTPError, MalformedResponseError):
e = sys.exc_info()[1]
if isinstance(e, MalformedResponseError) and e.body == 'Not found':
raise PointDNSException(value='Couldn\'t found mail redirect',
http_code=httplib.NOT_FOUND,
driver=self)
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
mail_redirect = self._to_mail_redirect(response.object,
zone_id=zone_id)
return mail_redirect
def ex_update_redirect(self, redirect, redirect_to=None, name=None,
type=None, iframe=None, query=None):
"""
:param redirect: Record to update
:type id: :class:`Redirect`
:param redirect_to: The data field. (optional).
:type redirect_to: ``str``
:param name: The FQDN for the record.
:type name: ``str``
:param type: The type of redirects 301, 302 or 0 for iframes.
(optional).
:type type: ``str``
:param iframe: Title of iframe (optional).
:type iframe: ``str``
:param query: boolean Information about including query string when
redirecting. (optional).
:type query: ``bool``
:rtype: ``list`` of :class:`Redirect`
"""
zone_id = redirect.zone.id
r_json = {}
if redirect_to is not None:
r_json['redirect_to'] = redirect_to
if name is not None:
r_json['name'] = name
if type is not None:
r_json['record_type'] = type
if iframe is not None:
r_json['iframe_title'] = iframe
if query is not None:
r_json['redirect_query_string'] = query
r_data = json.dumps({'zone_redirect': r_json})
try:
response = self.connection.request('/zones/%s/redirects/%s' %
(zone_id, redirect.id),
method='PUT', data=r_data)
except (BaseHTTPError, MalformedResponseError):
e = sys.exc_info()[1]
if isinstance(e, MalformedResponseError) and e.body == 'Not found':
raise PointDNSException(value='Couldn\'t found redirect',
http_code=httplib.NOT_FOUND,
driver=self)
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
redirect = self._to_redirect(response.object, zone=redirect.zone)
return redirect
def ex_update_mail_redirect(self, mail_r, destination, source=None):
"""
:param mail_r: Mail redirect to update
:type mail_r: :class:`MailRedirect`
:param destination: The destination address of mail redirect.
:type destination: ``str``
:param source: The source address of mail redirect. (optional)
:type source: ``str``
:rtype: ``list`` of :class:`MailRedirect`
"""
zone_id = mail_r.zone.id
r_json = {'destination_address': destination}
if source is not None:
r_json['source_address'] = source
r_data = json.dumps({'zone_redirect': r_json})
try:
response = self.connection.request('/zones/%s/mail_redirects/%s' %
(zone_id, mail_r.id),
method='PUT', data=r_data)
except (BaseHTTPError, MalformedResponseError):
e = sys.exc_info()[1]
if isinstance(e, MalformedResponseError) and e.body == 'Not found':
raise PointDNSException(value='Couldn\'t found mail redirect',
http_code=httplib.NOT_FOUND,
driver=self)
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
mail_redirect = self._to_mail_redirect(response.object,
zone=mail_r.zone)
return mail_redirect
def ex_delete_redirect(self, redirect):
"""
:param mail_r: Redirect to delete
:type mail_r: :class:`Redirect`
:rtype: ``bool``
"""
zone_id = redirect.zone.id
redirect_id = redirect.id
try:
self.connection.request('/zones/%s/redirects/%s' % (zone_id,
redirect_id), method='DELETE')
except (BaseHTTPError, MalformedResponseError):
e = sys.exc_info()[1]
if isinstance(e, MalformedResponseError) and e.body == 'Not found':
raise PointDNSException(value='Couldn\'t found redirect',
http_code=httplib.NOT_FOUND,
driver=self)
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
return True
def ex_delete_mail_redirect(self, mail_r):
"""
:param mail_r: Mail redirect to update
:type mail_r: :class:`MailRedirect`
:rtype: ``bool``
"""
zone_id = mail_r.zone.id
mail_r_id = mail_r.id
try:
self.connection.request('/zones/%s/mail_redirects/%s' % (zone_id,
mail_r_id), method='DELETE')
except (BaseHTTPError, MalformedResponseError):
e = sys.exc_info()[1]
if isinstance(e, MalformedResponseError) and e.body == 'Not found':
raise PointDNSException(value='Couldn\'t found mail redirect',
http_code=httplib.NOT_FOUND,
driver=self)
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
return True
def _to_zones(self, data):
zones = []
for zone in data:
_zone = self._to_zone(zone)
zones.append(_zone)
return zones
def _to_zone(self, data):
zone = data.get('zone')
id = zone.get('id')
name = zone.get('name')
ttl = zone.get('ttl')
extra = {'group': zone.get('group'),
'user-id': zone.get('user-id')}
# All zones are a primary ones by design, so they
# assume that are the master source of info about the
# zone, which is the case when domain DNS records
# points to PointDNS nameservers.
type = 'master'
return Zone(id=id, domain=name, type=type, ttl=ttl, driver=self,
extra=extra)
def _to_records(self, data, zone):
records = []
for item in data:
record = self._to_record(item, zone=zone)
records.append(record)
return records
def _to_record(self, data, zone_id=None, zone=None):
if not zone: # We need zone_id or zone
zone = self.get_zone(zone_id)
record = data.get('zone_record')
id = record.get('id')
name = record.get('name')
type = record.get('record_type')
data = record.get('data')
extra = {'ttl': record.get('ttl'),
'zone_id': record.get('zone_id'),
'aux': record.get('aux')}
return Record(id=id, name=name, type=type, data=data, zone=zone,
driver=self, ttl=record.get('ttl', None), extra=extra)
def _to_redirects(self, data, zone):
redirects = []
for item in data:
redirect = self._to_redirect(item, zone=zone)
redirects.append(redirect)
return redirects
def _to_redirect(self, data, zone_id=None, zone=None):
if not zone: # We need zone_id or zone
zone = self.get_zone(zone_id)
record = data.get('zone_redirect')
id = record.get('id')
name = record.get('name')
redirect_to = record.get('redirect_to')
type = record.get('redirect_type')
iframe = record.get('iframe_title')
query = record.get('redirect_query_string')
return Redirect(id, name, redirect_to, type, self, zone,
iframe=iframe, query=query)
def _to_mail_redirects(self, data, zone):
mail_redirects = []
for item in data:
mail_redirect = self._to_mail_redirect(item, zone=zone)
mail_redirects.append(mail_redirect)
return mail_redirects
def _to_mail_redirect(self, data, zone_id=None, zone=None):
if not zone: # We need zone_id or zone
zone = self.get_zone(zone_id)
record = data.get('zone_mail_redirect')
id = record.get('id')
destination = record.get('destination_address')
source = record.get('source_address')
return MailRedirect(id, source, destination, zone, self)
| apache-2.0 |
neilpelow/wmap-django | venv/lib/python3.5/site-packages/django/contrib/gis/geos/coordseq.py | 374 | 5482 | """
This module houses the GEOSCoordSeq object, which is used internally
by GEOSGeometry to house the actual coordinates of the Point,
LineString, and LinearRing geometries.
"""
from ctypes import byref, c_double, c_uint
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.libgeos import CS_PTR
from django.contrib.gis.shortcuts import numpy
from django.utils.six.moves import range
class GEOSCoordSeq(GEOSBase):
"The internal representation of a list of coordinates inside a Geometry."
ptr_type = CS_PTR
def __init__(self, ptr, z=False):
"Initializes from a GEOS pointer."
if not isinstance(ptr, CS_PTR):
raise TypeError('Coordinate sequence should initialize with a CS_PTR.')
self._ptr = ptr
self._z = z
def __iter__(self):
"Iterates over each point in the coordinate sequence."
for i in range(self.size):
yield self[i]
def __len__(self):
"Returns the number of points in the coordinate sequence."
return int(self.size)
def __str__(self):
"Returns the string representation of the coordinate sequence."
return str(self.tuple)
def __getitem__(self, index):
"Returns the coordinate sequence value at the given index."
coords = [self.getX(index), self.getY(index)]
if self.dims == 3 and self._z:
coords.append(self.getZ(index))
return tuple(coords)
def __setitem__(self, index, value):
"Sets the coordinate sequence value at the given index."
# Checking the input value
if isinstance(value, (list, tuple)):
pass
elif numpy and isinstance(value, numpy.ndarray):
pass
else:
raise TypeError('Must set coordinate with a sequence (list, tuple, or numpy array).')
# Checking the dims of the input
if self.dims == 3 and self._z:
n_args = 3
set_3d = True
else:
n_args = 2
set_3d = False
if len(value) != n_args:
raise TypeError('Dimension of value does not match.')
# Setting the X, Y, Z
self.setX(index, value[0])
self.setY(index, value[1])
if set_3d:
self.setZ(index, value[2])
# #### Internal Routines ####
def _checkindex(self, index):
"Checks the given index."
sz = self.size
if (sz < 1) or (index < 0) or (index >= sz):
raise IndexError('invalid GEOS Geometry index: %s' % str(index))
def _checkdim(self, dim):
"Checks the given dimension."
if dim < 0 or dim > 2:
raise GEOSException('invalid ordinate dimension "%d"' % dim)
# #### Ordinate getting and setting routines ####
def getOrdinate(self, dimension, index):
"Returns the value for the given dimension and index."
self._checkindex(index)
self._checkdim(dimension)
return capi.cs_getordinate(self.ptr, index, dimension, byref(c_double()))
def setOrdinate(self, dimension, index, value):
"Sets the value for the given dimension and index."
self._checkindex(index)
self._checkdim(dimension)
capi.cs_setordinate(self.ptr, index, dimension, value)
def getX(self, index):
"Get the X value at the index."
return self.getOrdinate(0, index)
def setX(self, index, value):
"Set X with the value at the given index."
self.setOrdinate(0, index, value)
def getY(self, index):
"Get the Y value at the given index."
return self.getOrdinate(1, index)
def setY(self, index, value):
"Set Y with the value at the given index."
self.setOrdinate(1, index, value)
def getZ(self, index):
"Get Z with the value at the given index."
return self.getOrdinate(2, index)
def setZ(self, index, value):
"Set Z with the value at the given index."
self.setOrdinate(2, index, value)
# ### Dimensions ###
@property
def size(self):
"Returns the size of this coordinate sequence."
return capi.cs_getsize(self.ptr, byref(c_uint()))
@property
def dims(self):
"Returns the dimensions of this coordinate sequence."
return capi.cs_getdims(self.ptr, byref(c_uint()))
@property
def hasz(self):
"""
Returns whether this coordinate sequence is 3D. This property value is
inherited from the parent Geometry.
"""
return self._z
# ### Other Methods ###
def clone(self):
"Clones this coordinate sequence."
return GEOSCoordSeq(capi.cs_clone(self.ptr), self.hasz)
@property
def kml(self):
"Returns the KML representation for the coordinates."
# Getting the substitution string depending on whether the coordinates have
# a Z dimension.
if self.hasz:
substr = '%s,%s,%s '
else:
substr = '%s,%s,0 '
return '<coordinates>%s</coordinates>' % \
''.join(substr % self[i] for i in range(len(self))).strip()
@property
def tuple(self):
"Returns a tuple version of this coordinate sequence."
n = self.size
if n == 1:
return self[0]
else:
return tuple(self[i] for i in range(n))
| gpl-3.0 |
goose3/goose3 | tests/test_reportagenewsarticle.py | 2 | 1221 | # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from .test_base import TestExtractionBase
class TestReportageNewsArticle(TestExtractionBase):
def test_reportagenewsarticle(self):
article = self.getArticle()
fields = ["schema", "publish_date", "domain", "final_url", "title"]
self.runArticleAssertions(article=article, fields=fields)
| apache-2.0 |
trezorg/django | django/templatetags/future.py | 226 | 3486 | from django.conf import settings
from django.template import Library, Node, Template, TemplateSyntaxError
from django.template.defaulttags import kwarg_re, include_is_allowed, SsiNode, URLNode
from django.utils.encoding import smart_str
register = Library()
@register.tag
def ssi(parser, token):
"""
Outputs the contents of a given file into the page.
Like a simple "include" tag, the ``ssi`` tag includes the contents
of another file -- which must be specified using an absolute path --
in the current page::
{% ssi "/home/html/ljworld.com/includes/right_generic.html" %}
If the optional "parsed" parameter is given, the contents of the included
file are evaluated as template code, with the current context::
{% ssi "/home/html/ljworld.com/includes/right_generic.html" parsed %}
"""
bits = token.contents.split()
parsed = False
if len(bits) not in (2, 3):
raise TemplateSyntaxError("'ssi' tag takes one argument: the path to"
" the file to be included")
if len(bits) == 3:
if bits[2] == 'parsed':
parsed = True
else:
raise TemplateSyntaxError("Second (optional) argument to %s tag"
" must be 'parsed'" % bits[0])
filepath = parser.compile_filter(bits[1])
return SsiNode(filepath, parsed, legacy_filepath=False)
@register.tag
def url(parser, token):
"""
Returns an absolute URL matching given view with its parameters.
This is a way to define links that aren't tied to a particular URL
configuration::
{% url "path.to.some_view" arg1 arg2 %}
or
{% url "path.to.some_view" name1=value1 name2=value2 %}
The first argument is a path to a view. It can be an absolute python path
or just ``app_name.view_name`` without the project name if the view is
located inside the project. Other arguments are comma-separated values
that will be filled in place of positional and keyword arguments in the
URL. All arguments for the URL should be present.
For example if you have a view ``app_name.client`` taking client's id and
the corresponding line in a URLconf looks like this::
('^client/(\d+)/$', 'app_name.client')
and this app's URLconf is included into the project's URLconf under some
path::
('^clients/', include('project_name.app_name.urls'))
then in a template you can create a link for a certain client like this::
{% url "app_name.client" client.id %}
The URL will look like ``/clients/client/123/``.
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument"
" (path to a view)" % bits[0])
viewname = parser.compile_filter(bits[1])
args = []
kwargs = {}
asvar = None
bits = bits[2:]
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to url tag")
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return URLNode(viewname, args, kwargs, asvar, legacy_view_name=False)
| bsd-3-clause |
simbha/mAngE-Gin | lib/Django 1.7/django/contrib/messages/api.py | 512 | 3202 | from django.contrib.messages import constants
from django.contrib.messages.storage import default_storage
from django.http import HttpRequest
__all__ = (
'add_message', 'get_messages',
'get_level', 'set_level',
'debug', 'info', 'success', 'warning', 'error',
'MessageFailure',
)
class MessageFailure(Exception):
pass
def add_message(request, level, message, extra_tags='', fail_silently=False):
"""
Attempts to add a message to the request using the 'messages' app.
"""
if not isinstance(request, HttpRequest):
raise TypeError("add_message() argument must be an HttpRequest object, "
"not '%s'." % request.__class__.__name__)
if hasattr(request, '_messages'):
return request._messages.add(level, message, extra_tags)
if not fail_silently:
raise MessageFailure('You cannot add messages without installing '
'django.contrib.messages.middleware.MessageMiddleware')
def get_messages(request):
"""
Returns the message storage on the request if it exists, otherwise returns
an empty list.
"""
if hasattr(request, '_messages'):
return request._messages
else:
return []
def get_level(request):
"""
Returns the minimum level of messages to be recorded.
The default level is the ``MESSAGE_LEVEL`` setting. If this is not found,
the ``INFO`` level is used.
"""
if hasattr(request, '_messages'):
storage = request._messages
else:
storage = default_storage(request)
return storage.level
def set_level(request, level):
"""
Sets the minimum level of messages to be recorded, returning ``True`` if
the level was recorded successfully.
If set to ``None``, the default level will be used (see the ``get_level``
method).
"""
if not hasattr(request, '_messages'):
return False
request._messages.level = level
return True
def debug(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``DEBUG`` level.
"""
add_message(request, constants.DEBUG, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def info(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``INFO`` level.
"""
add_message(request, constants.INFO, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def success(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``SUCCESS`` level.
"""
add_message(request, constants.SUCCESS, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def warning(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``WARNING`` level.
"""
add_message(request, constants.WARNING, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def error(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``ERROR`` level.
"""
add_message(request, constants.ERROR, message, extra_tags=extra_tags,
fail_silently=fail_silently)
| mit |
sahiljain/catapult | telemetry/telemetry/decorators.py | 6 | 11985 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=protected-access
import datetime
import functools
import os
import inspect
import types
import warnings
def Cache(obj):
"""Decorator for caching read-only properties.
Example usage (always returns the same Foo instance):
@Cache
def CreateFoo():
return Foo()
If CreateFoo() accepts parameters, a separate cached value is maintained
for each unique parameter combination.
Cached methods maintain their cache for the lifetime of the /instance/, while
cached functions maintain their cache for the lifetime of the /module/.
"""
@functools.wraps(obj)
def Cacher(*args, **kwargs):
cacher = args[0] if inspect.getargspec(obj).args[:1] == ['self'] else obj
cacher.__cache = cacher.__cache if hasattr(cacher, '__cache') else {}
key = str(obj) + str(args) + str(kwargs)
if key not in cacher.__cache:
cacher.__cache[key] = obj(*args, **kwargs)
return cacher.__cache[key]
return Cacher
class Deprecated(object):
def __init__(self, year, month, day, extra_guidance=''):
self._date_of_support_removal = datetime.date(year, month, day)
self._extra_guidance = extra_guidance
def _DisplayWarningMessage(self, target):
target_str = ''
if isinstance(target, types.FunctionType):
target_str = 'Function %s' % target.__name__
else:
target_str = 'Class %s' % target.__name__
warnings.warn('%s is deprecated. It will no longer be supported on %s. '
'Please remove it or switch to an alternative before '
'that time. %s\n'
% (target_str,
self._date_of_support_removal.strftime('%B %d, %Y'),
self._extra_guidance),
stacklevel=self._ComputeStackLevel())
def _ComputeStackLevel(self):
this_file, _ = os.path.splitext(__file__)
frame = inspect.currentframe()
i = 0
while True:
filename = frame.f_code.co_filename
if not filename.startswith(this_file):
return i
frame = frame.f_back
i += 1
def __call__(self, target):
if isinstance(target, types.FunctionType):
@functools.wraps(target)
def wrapper(*args, **kwargs):
self._DisplayWarningMessage(target)
return target(*args, **kwargs)
return wrapper
elif inspect.isclass(target):
original_ctor = target.__init__
# We have to handle case original_ctor is object.__init__ separately
# since object.__init__ does not have __module__ defined, which
# cause functools.wraps() to raise exception.
if original_ctor == object.__init__:
def new_ctor(*args, **kwargs):
self._DisplayWarningMessage(target)
return original_ctor(*args, **kwargs)
else:
@functools.wraps(original_ctor)
def new_ctor(*args, **kwargs):
self._DisplayWarningMessage(target)
return original_ctor(*args, **kwargs)
target.__init__ = new_ctor
return target
else:
raise TypeError('@Deprecated is only applicable to functions or classes')
def Disabled(*args):
"""Decorator for disabling tests/benchmarks.
If args are given, the test will be disabled if ANY of the args match the
browser type, OS name or OS version:
@Disabled('canary') # Disabled for canary browsers
@Disabled('win') # Disabled on Windows.
@Disabled('win', 'linux') # Disabled on both Windows and Linux.
@Disabled('mavericks') # Disabled on Mac Mavericks (10.9) only.
@Disabled('all') # Unconditionally disabled.
"""
def _Disabled(func):
disabled_attr_name = DisabledAttributeName(func)
if not hasattr(func, disabled_attr_name):
setattr(func, disabled_attr_name, set())
disabled_set = getattr(func, disabled_attr_name)
disabled_set.update(disabled_strings)
setattr(func, disabled_attr_name, disabled_set)
return func
assert args, (
"@Disabled(...) requires arguments. Use @Disabled('all') if you want to "
'unconditionally disable the test.')
assert not callable(args[0]), 'Please use @Disabled(..).'
disabled_strings = list(args)
for disabled_string in disabled_strings:
# TODO(tonyg): Validate that these strings are recognized.
assert isinstance(disabled_string, str), '@Disabled accepts a list of strs'
return _Disabled
def Enabled(*args):
"""Decorator for enabling tests/benchmarks.
The test will be enabled if ANY of the args match the browser type, OS name
or OS version:
@Enabled('canary') # Enabled only for canary browsers
@Enabled('win') # Enabled only on Windows.
@Enabled('win', 'linux') # Enabled only on Windows or Linux.
@Enabled('mavericks') # Enabled only on Mac Mavericks (10.9).
"""
def _Enabled(func):
enabled_attr_name = EnabledAttributeName(func)
if not hasattr(func, enabled_attr_name):
setattr(func, enabled_attr_name, set())
enabled_set = getattr(func, enabled_attr_name)
enabled_set.update(enabled_strings)
setattr(func, enabled_attr_name, enabled_set)
return func
assert args, '@Enabled(..) requires arguments'
assert not callable(args[0]), 'Please use @Enabled(..).'
enabled_strings = list(args)
for enabled_string in enabled_strings:
# TODO(tonyg): Validate that these strings are recognized.
assert isinstance(enabled_string, str), '@Enabled accepts a list of strs'
return _Enabled
def Owner(emails=None, component=None):
"""Decorator for specifying the owner of a benchmark."""
def _Owner(func):
owner_attr_name = OwnerAttributeName(func)
assert inspect.isclass(func), '@Owner(...) can only be used on classes'
if not hasattr(func, owner_attr_name):
setattr(func, owner_attr_name, {})
owner_dict = getattr(func, owner_attr_name)
if emails:
assert 'emails' not in owner_dict, 'emails can only be set once'
owner_dict['emails'] = emails
if component:
assert 'component' not in owner_dict, 'component can only be set once'
owner_dict['component'] = component
setattr(func, owner_attr_name, owner_dict)
return func
help_text = '@Owner(...) requires emails and/or a component'
assert emails or component, help_text
if emails:
assert isinstance(emails, list), 'emails must be a list of strs'
for e in emails:
assert isinstance(e, str), 'emails must be a list of strs'
return _Owner
# TODO(dpranke): Remove if we don't need this.
def Isolated(*args):
"""Decorator for noting that tests must be run in isolation.
The test will be run by itself (not concurrently with any other tests)
if ANY of the args match the browser type, OS name, or OS version."""
def _Isolated(func):
if not isinstance(func, types.FunctionType):
func._isolated_strings = isolated_strings
return func
@functools.wraps(func)
def wrapper(*args, **kwargs):
func(*args, **kwargs)
wrapper._isolated_strings = isolated_strings
return wrapper
if len(args) == 1 and callable(args[0]):
isolated_strings = []
return _Isolated(args[0])
isolated_strings = list(args)
for isolated_string in isolated_strings:
# TODO(tonyg): Validate that these strings are recognized.
assert isinstance(isolated_string, str), 'Isolated accepts a list of strs'
return _Isolated
# TODO(nednguyen): Remove this and have call site just use ShouldSkip directly.
def IsEnabled(test, possible_browser):
"""Returns True iff |test| is enabled given the |possible_browser|.
Use to respect the @Enabled / @Disabled decorators.
Args:
test: A function or class that may contain _disabled_strings and/or
_enabled_strings attributes.
possible_browser: A PossibleBrowser to check whether |test| may run against.
"""
should_skip, msg = ShouldSkip(test, possible_browser)
return (not should_skip, msg)
def IsBenchmarkEnabled(benchmark, possible_browser):
return (not benchmark.ShouldDisable(possible_browser) and
IsEnabled(benchmark, possible_browser)[0])
def _TestName(test):
if inspect.ismethod(test):
# On methods, __name__ is "instancemethod", use __func__.__name__ instead.
test = test.__func__
if hasattr(test, '__name__'):
return test.__name__
elif hasattr(test, '__class__'):
return test.__class__.__name__
return str(test)
def DisabledAttributeName(test):
name = _TestName(test)
return '_%s_%s_disabled_strings' % (test.__module__, name)
def GetDisabledAttributes(test):
disabled_attr_name = DisabledAttributeName(test)
if not hasattr(test, disabled_attr_name):
return set()
return set(getattr(test, disabled_attr_name))
def GetEnabledAttributes(test):
enabled_attr_name = EnabledAttributeName(test)
if not hasattr(test, enabled_attr_name):
return set()
enabled_strings = set(getattr(test, enabled_attr_name))
return enabled_strings
def EnabledAttributeName(test):
name = _TestName(test)
return '_%s_%s_enabled_strings' % (test.__module__, name)
def OwnerAttributeName(test):
name = _TestName(test)
return '_%s_%s_owner' % (test.__module__, name)
def GetEmails(test):
owner_attr_name = OwnerAttributeName(test)
owner = getattr(test, owner_attr_name, {})
if 'emails' in owner:
return owner['emails']
return None
def GetComponent(test):
owner_attr_name = OwnerAttributeName(test)
owner = getattr(test, owner_attr_name, {})
if 'component' in owner:
return owner['component']
return None
def ShouldSkip(test, possible_browser):
"""Returns whether the test should be skipped and the reason for it."""
platform_attributes = _PlatformAttributes(possible_browser)
name = _TestName(test)
skip = 'Skipping %s (%s) because' % (name, str(test))
running = 'You are running %r.' % platform_attributes
disabled_attr_name = DisabledAttributeName(test)
if hasattr(test, disabled_attr_name):
disabled_strings = getattr(test, disabled_attr_name)
if 'all' in disabled_strings:
return (True, '%s it is unconditionally disabled.' % skip)
if set(disabled_strings) & set(platform_attributes):
return (True, '%s it is disabled for %s. %s' %
(skip, ' and '.join(disabled_strings), running))
enabled_attr_name = EnabledAttributeName(test)
if hasattr(test, enabled_attr_name):
enabled_strings = getattr(test, enabled_attr_name)
if 'all' in enabled_strings:
return False, None # No arguments to @Enabled means always enable.
if not set(enabled_strings) & set(platform_attributes):
return (True, '%s it is only enabled for %s. %s' %
(skip, ' or '.join(enabled_strings), running))
return False, None
def ShouldBeIsolated(test, possible_browser):
platform_attributes = _PlatformAttributes(possible_browser)
if hasattr(test, '_isolated_strings'):
isolated_strings = test._isolated_strings
if not isolated_strings:
return True # No arguments to @Isolated means always isolate.
for isolated_string in isolated_strings:
if isolated_string in platform_attributes:
return True
return False
return False
def _PlatformAttributes(possible_browser):
"""Returns a list of platform attribute strings."""
attributes = [a.lower() for a in [
possible_browser.browser_type,
possible_browser.platform.GetOSName(),
possible_browser.platform.GetOSVersionName(),
]]
if possible_browser.supports_tab_control:
attributes.append('has tabs')
if 'content-shell' in possible_browser.browser_type:
attributes.append('content-shell')
if possible_browser.browser_type == 'reference':
ref_attributes = []
for attribute in attributes:
if attribute != 'reference':
ref_attributes.append('%s-reference' % attribute)
attributes.extend(ref_attributes)
return attributes
| bsd-3-clause |
axbaretto/beam | sdks/python/.tox/docs/lib/python2.7/site-packages/jinja2/_compat.py | 214 | 2596 | # -*- coding: utf-8 -*-
"""
jinja2._compat
~~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: Copyright 2013 by the Jinja team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
PY2 = sys.version_info[0] == 2
PYPY = hasattr(sys, 'pypy_translation_info')
_identity = lambda x: x
if not PY2:
unichr = chr
range_type = range
text_type = str
string_types = (str,)
integer_types = (int,)
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
import pickle
from io import BytesIO, StringIO
NativeStringIO = StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
ifilter = filter
imap = map
izip = zip
intern = sys.intern
implements_iterator = _identity
implements_to_string = _identity
encode_filename = _identity
else:
unichr = unichr
text_type = unicode
range_type = xrange
string_types = (str, unicode)
integer_types = (int, long)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
import cPickle as pickle
from cStringIO import StringIO as BytesIO, StringIO
NativeStringIO = BytesIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
from itertools import imap, izip, ifilter
intern = intern
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def encode_filename(filename):
if isinstance(filename, unicode):
return filename.encode('utf-8')
return filename
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
try:
from urllib.parse import quote_from_bytes as url_quote
except ImportError:
from urllib import quote as url_quote
| apache-2.0 |
benjamindeleener/odoo | addons/hr_timesheet_sheet/wizard/hr_timesheet_current.py | 46 | 1421 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.exceptions import UserError
class hr_timesheet_current_open(osv.osv_memory):
_name = 'hr.timesheet.current.open'
_description = 'hr.timesheet.current.open'
def open_timesheet(self, cr, uid, ids, context=None):
ts = self.pool.get('hr_timesheet_sheet.sheet')
if context is None:
context = {}
view_type = 'form,tree'
ids = ts.search(cr, uid, [('user_id','=',uid),('state','in',('draft','new')),('date_from','<=',time.strftime('%Y-%m-%d')), ('date_to','>=',time.strftime('%Y-%m-%d'))], context=context)
if len(ids) > 1:
view_type = 'tree,form'
domain = "[('id','in',["+','.join(map(str, ids))+"]),('user_id', '=', uid)]"
elif len(ids)==1:
domain = "[('user_id', '=', uid)]"
else:
domain = "[('user_id', '=', uid)]"
value = {
'domain': domain,
'name': _('Open Timesheet'),
'view_type': 'form',
'view_mode': view_type,
'res_model': 'hr_timesheet_sheet.sheet',
'view_id': False,
'type': 'ir.actions.act_window'
}
if len(ids) == 1:
value['res_id'] = ids[0]
return value
| gpl-3.0 |
leppa/home-assistant | homeassistant/components/wink/switch.py | 12 | 2085 | """Support for Wink switches."""
import logging
import pywink
from homeassistant.helpers.entity import ToggleEntity
from . import DOMAIN, WinkDevice
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Wink platform."""
for switch in pywink.get_switches():
_id = switch.object_id() + switch.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkToggleDevice(switch, hass)])
for switch in pywink.get_powerstrips():
_id = switch.object_id() + switch.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkToggleDevice(switch, hass)])
for sprinkler in pywink.get_sprinklers():
_id = sprinkler.object_id() + sprinkler.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkToggleDevice(sprinkler, hass)])
for switch in pywink.get_binary_switch_groups():
_id = switch.object_id() + switch.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkToggleDevice(switch, hass)])
class WinkToggleDevice(WinkDevice, ToggleEntity):
"""Representation of a Wink toggle device."""
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]["entities"]["switch"].append(self)
@property
def is_on(self):
"""Return true if device is on."""
return self.wink.state()
def turn_on(self, **kwargs):
"""Turn the device on."""
self.wink.set_state(True)
def turn_off(self, **kwargs):
"""Turn the device off."""
self.wink.set_state(False)
@property
def device_state_attributes(self):
"""Return the state attributes."""
attributes = super().device_state_attributes
try:
event = self.wink.last_event()
if event is not None:
attributes["last_event"] = event
except AttributeError:
pass
return attributes
| apache-2.0 |
wkschwartz/django | tests/runtests.py | 2 | 23029 | #!/usr/bin/env python
import argparse
import atexit
import copy
import os
import shutil
import socket
import subprocess
import sys
import tempfile
import warnings
try:
import django
except ImportError as e:
raise RuntimeError(
'Django module not found, reference tests/README.rst for instructions.'
) from e
else:
from django.apps import apps
from django.conf import settings
from django.db import connection, connections
from django.test import TestCase, TransactionTestCase
from django.test.runner import default_test_processes
from django.test.selenium import SeleniumTestCaseBase
from django.test.utils import NullTimeKeeper, TimeKeeper, get_runner
from django.utils.deprecation import (
RemovedInDjango40Warning, RemovedInDjango41Warning,
)
from django.utils.log import DEFAULT_LOGGING
from django.utils.version import PY37
try:
import MySQLdb
except ImportError:
pass
else:
# Ignore informational warnings from QuerySet.explain().
warnings.filterwarnings('ignore', r'\(1003, *', category=MySQLdb.Warning)
# Make deprecation warnings errors to ensure no usage of deprecated features.
warnings.simplefilter("error", RemovedInDjango40Warning)
warnings.simplefilter('error', RemovedInDjango41Warning)
# Make resource and runtime warning errors to ensure no usage of error prone
# patterns.
warnings.simplefilter("error", ResourceWarning)
warnings.simplefilter("error", RuntimeWarning)
# Ignore known warnings in test dependencies.
warnings.filterwarnings("ignore", "'U' mode is deprecated", DeprecationWarning, module='docutils.io')
# RemovedInDjango41Warning: Ignore MemcachedCache deprecation warning.
warnings.filterwarnings(
'ignore',
'MemcachedCache is deprecated',
category=RemovedInDjango41Warning,
)
RUNTESTS_DIR = os.path.abspath(os.path.dirname(__file__))
TEMPLATE_DIR = os.path.join(RUNTESTS_DIR, 'templates')
# Create a specific subdirectory for the duration of the test suite.
TMPDIR = tempfile.mkdtemp(prefix='django_')
# Set the TMPDIR environment variable in addition to tempfile.tempdir
# so that children processes inherit it.
tempfile.tempdir = os.environ['TMPDIR'] = TMPDIR
# Removing the temporary TMPDIR.
atexit.register(shutil.rmtree, TMPDIR)
SUBDIRS_TO_SKIP = [
'data',
'import_error_package',
'test_runner_apps',
]
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.staticfiles',
]
ALWAYS_MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
# Need to add the associated contrib app to INSTALLED_APPS in some cases to
# avoid "RuntimeError: Model class X doesn't declare an explicit app_label
# and isn't in an application in INSTALLED_APPS."
CONTRIB_TESTS_TO_APPS = {
'deprecation': ['django.contrib.flatpages', 'django.contrib.redirects'],
'flatpages_tests': ['django.contrib.flatpages'],
'redirects_tests': ['django.contrib.redirects'],
}
def get_test_modules():
modules = []
discovery_paths = [(None, RUNTESTS_DIR)]
if connection.features.gis_enabled:
# GIS tests are in nested apps
discovery_paths.append(('gis_tests', os.path.join(RUNTESTS_DIR, 'gis_tests')))
else:
SUBDIRS_TO_SKIP.append('gis_tests')
for modpath, dirpath in discovery_paths:
for f in os.scandir(dirpath):
if ('.' not in f.name and
os.path.basename(f.name) not in SUBDIRS_TO_SKIP and
not f.is_file() and
os.path.exists(os.path.join(f.path, '__init__.py'))):
modules.append((modpath, f.name))
return modules
def get_installed():
return [app_config.name for app_config in apps.get_app_configs()]
def setup(verbosity, test_labels, parallel, start_at, start_after):
# Reduce the given test labels to just the app module path.
test_labels_set = set()
for label in test_labels:
bits = label.split('.')[:1]
test_labels_set.add('.'.join(bits))
if verbosity >= 1:
msg = "Testing against Django installed in '%s'" % os.path.dirname(django.__file__)
max_parallel = default_test_processes() if parallel == 0 else parallel
if max_parallel > 1:
msg += " with up to %d processes" % max_parallel
print(msg)
# Force declaring available_apps in TransactionTestCase for faster tests.
def no_available_apps(self):
raise Exception("Please define available_apps in TransactionTestCase "
"and its subclasses.")
TransactionTestCase.available_apps = property(no_available_apps)
TestCase.available_apps = None
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
'TEMPLATES': settings.TEMPLATES,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
'MIDDLEWARE': settings.MIDDLEWARE,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TMPDIR, 'static')
settings.TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
}]
settings.LANGUAGE_CODE = 'en'
settings.SITE_ID = 1
settings.MIDDLEWARE = ALWAYS_MIDDLEWARE
settings.MIGRATION_MODULES = {
# This lets us skip creating migrations for the test models as many of
# them depend on one of the following contrib applications.
'auth': None,
'contenttypes': None,
'sessions': None,
}
log_config = copy.deepcopy(DEFAULT_LOGGING)
# Filter out non-error logging so we don't have to capture it in lots of
# tests.
log_config['loggers']['django']['level'] = 'ERROR'
settings.LOGGING = log_config
settings.SILENCED_SYSTEM_CHECKS = [
'fields.W342', # ForeignKey(unique=True) -> OneToOneField
'fields.W903', # NullBooleanField deprecated.
]
# Load all the ALWAYS_INSTALLED_APPS.
django.setup()
# It would be nice to put this validation earlier but it must come after
# django.setup() so that connection.features.gis_enabled can be accessed
# without raising AppRegistryNotReady when running gis_tests in isolation
# on some backends (e.g. PostGIS).
if 'gis_tests' in test_labels_set and not connection.features.gis_enabled:
print('Aborting: A GIS database backend is required to run gis_tests.')
sys.exit(1)
def _module_match_label(module_label, label):
# Exact or ancestor match.
return module_label == label or module_label.startswith(label + '.')
# Load all the test model apps.
test_modules = get_test_modules()
found_start = not (start_at or start_after)
installed_app_names = set(get_installed())
for modpath, module_name in test_modules:
if modpath:
module_label = modpath + '.' + module_name
else:
module_label = module_name
if not found_start:
if start_at and _module_match_label(module_label, start_at):
found_start = True
elif start_after and _module_match_label(module_label, start_after):
found_start = True
continue
else:
continue
# if the module (or an ancestor) was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to INSTALLED_APPS.
module_found_in_labels = not test_labels or any(
_module_match_label(module_label, label) for label in test_labels_set
)
if module_name in CONTRIB_TESTS_TO_APPS and module_found_in_labels:
for contrib_app in CONTRIB_TESTS_TO_APPS[module_name]:
if contrib_app not in settings.INSTALLED_APPS:
settings.INSTALLED_APPS.append(contrib_app)
if module_found_in_labels and module_label not in installed_app_names:
if verbosity >= 2:
print("Importing application %s" % module_name)
settings.INSTALLED_APPS.append(module_label)
# Add contrib.gis to INSTALLED_APPS if needed (rather than requiring
# @override_settings(INSTALLED_APPS=...) on all test cases.
gis = 'django.contrib.gis'
if connection.features.gis_enabled and gis not in settings.INSTALLED_APPS:
if verbosity >= 2:
print("Importing application %s" % gis)
settings.INSTALLED_APPS.append(gis)
apps.set_installed_apps(settings.INSTALLED_APPS)
# Set an environment variable that other code may consult to see if
# Django's own test suite is running.
os.environ['RUNNING_DJANGOS_TEST_SUITE'] = 'true'
return state
def teardown(state):
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
# Discard the multiprocessing.util finalizer that tries to remove a
# temporary directory that's already removed by this script's
# atexit.register(shutil.rmtree, TMPDIR) handler. Prevents
# FileNotFoundError at the end of a test run (#27890).
from multiprocessing.util import _finalizer_registry
_finalizer_registry.pop((-100, 0), None)
del os.environ['RUNNING_DJANGOS_TEST_SUITE']
def actual_test_processes(parallel):
if parallel == 0:
# This doesn't work before django.setup() on some databases.
if all(conn.features.can_clone_databases for conn in connections.all()):
return default_test_processes()
else:
return 1
else:
return parallel
class ActionSelenium(argparse.Action):
"""
Validate the comma-separated list of requested browsers.
"""
def __call__(self, parser, namespace, values, option_string=None):
browsers = values.split(',')
for browser in browsers:
try:
SeleniumTestCaseBase.import_webdriver(browser)
except ImportError:
raise argparse.ArgumentError(self, "Selenium browser specification '%s' is not valid." % browser)
setattr(namespace, self.dest, browsers)
def django_tests(verbosity, interactive, failfast, keepdb, reverse,
test_labels, debug_sql, parallel, tags, exclude_tags,
test_name_patterns, start_at, start_after, pdb, buffer,
timing):
state = setup(verbosity, test_labels, parallel, start_at, start_after)
# Run the test suite, including the extra validation tests.
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
keepdb=keepdb,
reverse=reverse,
debug_sql=debug_sql,
parallel=actual_test_processes(parallel),
tags=tags,
exclude_tags=exclude_tags,
test_name_patterns=test_name_patterns,
pdb=pdb,
buffer=buffer,
timing=timing,
)
failures = test_runner.run_tests(test_labels or get_installed())
teardown(state)
return failures
def get_subprocess_args(options):
subprocess_args = [
sys.executable, __file__, '--settings=%s' % options.settings
]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
if options.tags:
subprocess_args.append('--tag=%s' % options.tags)
if options.exclude_tags:
subprocess_args.append('--exclude_tag=%s' % options.exclude_tags)
return subprocess_args
def bisect_tests(bisection_label, options, test_labels, parallel, start_at, start_after):
state = setup(options.verbosity, test_labels, parallel, start_at, start_after)
test_labels = test_labels or get_installed()
print('***** Bisecting test suite: %s' % ' '.join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = get_subprocess_args(options)
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels) // 2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print('***** Pass %da: Running the first half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_a))
failures_a = subprocess.run(subprocess_args + test_labels_a)
print('***** Pass %db: Running the second half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_b))
print('')
failures_b = subprocess.run(subprocess_args + test_labels_b)
if failures_a.returncode and not failures_b.returncode:
print("***** Problem found in first half. Bisecting again...")
iteration += 1
test_labels = test_labels_a[:-1]
elif failures_b.returncode and not failures_a.returncode:
print("***** Problem found in second half. Bisecting again...")
iteration += 1
test_labels = test_labels_b[:-1]
elif failures_a.returncode and failures_b.returncode:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
teardown(state)
def paired_tests(paired_test, options, test_labels, parallel, start_at, start_after):
state = setup(options.verbosity, test_labels, parallel, start_at, start_after)
test_labels = test_labels or get_installed()
print('***** Trying paired execution')
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = get_subprocess_args(options)
for i, label in enumerate(test_labels):
print('***** %d of %d: Check test pairing with %s' % (
i + 1, len(test_labels), label))
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print('***** Found problem pair with %s' % label)
return
print('***** No problem pair found')
teardown(state)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the Django test suite.")
parser.add_argument(
'modules', nargs='*', metavar='module',
help='Optional path(s) to test modules; e.g. "i18n" or '
'"i18n.tests.TranslationTests.test_lazy_objects".',
)
parser.add_argument(
'-v', '--verbosity', default=1, type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output',
)
parser.add_argument(
'--noinput', action='store_false', dest='interactive',
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--failfast', action='store_true',
help='Tells Django to stop running the test suite after first failed test.',
)
parser.add_argument(
'--keepdb', action='store_true',
help='Tells Django to preserve the test database between runs.',
)
parser.add_argument(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, either the DJANGO_SETTINGS_MODULE '
'environment variable or "test_sqlite" will be used.',
)
parser.add_argument(
'--bisect',
help='Bisect the test suite to discover a test that causes a test '
'failure when combined with the named test.',
)
parser.add_argument(
'--pair',
help='Run the test suite in pairs with the named test to find problem pairs.',
)
parser.add_argument(
'--reverse', action='store_true',
help='Sort test suites and test cases in opposite order to debug '
'test side effects not apparent with normal execution lineup.',
)
parser.add_argument(
'--selenium', action=ActionSelenium, metavar='BROWSERS',
help='A comma-separated list of browsers to run the Selenium tests against.',
)
parser.add_argument(
'--headless', action='store_true',
help='Run selenium tests in headless mode, if the browser supports the option.',
)
parser.add_argument(
'--selenium-hub',
help='A URL for a selenium hub instance to use in combination with --selenium.',
)
parser.add_argument(
'--external-host', default=socket.gethostname(),
help='The external host that can be reached by the selenium hub instance when running Selenium '
'tests via Selenium Hub.',
)
parser.add_argument(
'--debug-sql', action='store_true',
help='Turn on the SQL query logger within tests.',
)
parser.add_argument(
'--parallel', nargs='?', default=0, type=int,
const=default_test_processes(), metavar='N',
help='Run tests using up to N parallel processes.',
)
parser.add_argument(
'--tag', dest='tags', action='append',
help='Run only tests with the specified tags. Can be used multiple times.',
)
parser.add_argument(
'--exclude-tag', dest='exclude_tags', action='append',
help='Do not run tests with the specified tag. Can be used multiple times.',
)
parser.add_argument(
'--start-after', dest='start_after',
help='Run tests starting after the specified top-level module.',
)
parser.add_argument(
'--start-at', dest='start_at',
help='Run tests starting at the specified top-level module.',
)
parser.add_argument(
'--pdb', action='store_true',
help='Runs the PDB debugger on error or failure.'
)
parser.add_argument(
'-b', '--buffer', action='store_true',
help='Discard output of passing tests.',
)
parser.add_argument(
'--timing', action='store_true',
help='Output timings, including database set up and total run time.',
)
if PY37:
parser.add_argument(
'-k', dest='test_name_patterns', action='append',
help=(
'Only run test methods and classes matching test name pattern. '
'Same as unittest -k option. Can be used multiple times.'
),
)
options = parser.parse_args()
using_selenium_hub = options.selenium and options.selenium_hub
if options.selenium_hub and not options.selenium:
parser.error('--selenium-hub and --external-host require --selenium to be used.')
if using_selenium_hub and not options.external_host:
parser.error('--selenium-hub and --external-host must be used together.')
# Allow including a trailing slash on app_labels for tab completion convenience
options.modules = [os.path.normpath(labels) for labels in options.modules]
mutually_exclusive_options = [options.start_at, options.start_after, options.modules]
enabled_module_options = [bool(option) for option in mutually_exclusive_options].count(True)
if enabled_module_options > 1:
print('Aborting: --start-at, --start-after, and test labels are mutually exclusive.')
sys.exit(1)
for opt_name in ['start_at', 'start_after']:
opt_val = getattr(options, opt_name)
if opt_val:
if '.' in opt_val:
print('Aborting: --%s must be a top-level module.' % opt_name.replace('_', '-'))
sys.exit(1)
setattr(options, opt_name, os.path.normpath(opt_val))
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
else:
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_sqlite')
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.selenium:
if not options.tags:
options.tags = ['selenium']
elif 'selenium' not in options.tags:
options.tags.append('selenium')
if options.selenium_hub:
SeleniumTestCaseBase.selenium_hub = options.selenium_hub
SeleniumTestCaseBase.external_host = options.external_host
SeleniumTestCaseBase.headless = options.headless
SeleniumTestCaseBase.browsers = options.selenium
if options.bisect:
bisect_tests(
options.bisect, options, options.modules, options.parallel,
options.start_at, options.start_after,
)
elif options.pair:
paired_tests(
options.pair, options, options.modules, options.parallel,
options.start_at, options.start_after,
)
else:
time_keeper = TimeKeeper() if options.timing else NullTimeKeeper()
with time_keeper.timed('Total run'):
failures = django_tests(
options.verbosity, options.interactive, options.failfast,
options.keepdb, options.reverse, options.modules,
options.debug_sql, options.parallel, options.tags,
options.exclude_tags,
getattr(options, 'test_name_patterns', None),
options.start_at, options.start_after, options.pdb, options.buffer,
options.timing,
)
time_keeper.print_results()
if failures:
sys.exit(1)
| bsd-3-clause |
matthiasdiener/spack | var/spack/repos/builtin/packages/perl-sub-name/package.py | 5 | 1543 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PerlSubName(PerlPackage):
"""Name or rename a sub"""
homepage = "http://search.cpan.org/~ether/Sub-Name-0.21/lib/Sub/Name.pm"
url = "http://search.cpan.org/CPAN/authors/id/E/ET/ETHER/Sub-Name-0.21.tar.gz"
version('0.21', '7e7a181e30b3249d0b81585f55e36621')
| lgpl-2.1 |
samrussell/ryu | ryu/tests/integrated/tester.py | 63 | 7221 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import sys
import logging
import itertools
from ryu import utils
from ryu.lib import mac
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller import handler
from ryu.controller import dpset
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import ofproto_v1_2
LOG = logging.getLogger(__name__)
LOG_TEST_START = 'TEST_START: %s'
LOG_TEST_RESULTS = 'TEST_RESULTS:'
LOG_TEST_FINISH = 'TEST_FINISHED: Completed=[%s] (OK=%s NG=%s SKIP=%s)'
LOG_TEST_UNSUPPORTED = 'SKIP (unsupported)'
class TestFlowBase(app_manager.RyuApp):
"""
To run the tests is required for the following pair of functions.
1. test_<test name>()
To send flows to switch.
2. verify_<test name>() or _verify_default()
To check flows of switch.
"""
_CONTEXTS = {'dpset': dpset.DPSet}
def __init__(self, *args, **kwargs):
super(TestFlowBase, self).__init__(*args, **kwargs)
self.pending = []
self.results = {}
self.current = None
self.unclear = 0
for t in dir(self):
if t.startswith("test_"):
self.pending.append(t)
self.pending.sort(reverse=True)
self.unclear = len(self.pending)
def delete_all_flows(self, dp):
if dp.ofproto == ofproto_v1_0:
match = dp.ofproto_parser.OFPMatch(dp.ofproto.OFPFW_ALL,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0)
m = dp.ofproto_parser.OFPFlowMod(dp, match, 0,
dp.ofproto.OFPFC_DELETE,
0, 0, 0, 0,
dp.ofproto.OFPP_NONE, 0, None)
elif dp.ofproto == ofproto_v1_2:
match = dp.ofproto_parser.OFPMatch()
m = dp.ofproto_parser.OFPFlowMod(dp, 0, 0, dp.ofproto.OFPTT_ALL,
dp.ofproto.OFPFC_DELETE,
0, 0, 0, 0xffffffff,
dp.ofproto.OFPP_ANY,
dp.ofproto.OFPG_ANY,
0, match, [])
dp.send_msg(m)
def send_flow_stats(self, dp):
if dp.ofproto == ofproto_v1_0:
match = dp.ofproto_parser.OFPMatch(dp.ofproto.OFPFW_ALL,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0)
m = dp.ofproto_parser.OFPFlowStatsRequest(dp, 0, match,
0, dp.ofproto.OFPP_NONE)
elif dp.ofproto == ofproto_v1_2:
match = dp.ofproto_parser.OFPMatch()
m = dp.ofproto_parser.OFPFlowStatsRequest(dp, dp.ofproto.OFPTT_ALL,
dp.ofproto.OFPP_ANY,
dp.ofproto.OFPG_ANY,
0, 0, match)
dp.send_msg(m)
def verify_default(self, dp, stats):
return 'function %s() is not found.' % ("verify" + self.current[4:], )
def start_next_test(self, dp):
self.delete_all_flows(dp)
dp.send_barrier()
if len(self.pending):
t = self.pending.pop()
if self.is_supported(t):
LOG.info(LOG_TEST_START, t)
self.current = t
getattr(self, t)(dp)
dp.send_barrier()
self.send_flow_stats(dp)
else:
self.results[t] = LOG_TEST_UNSUPPORTED
self.unclear -= 1
self.start_next_test(dp)
else:
self.print_results()
def print_results(self):
LOG.info("TEST_RESULTS:")
ok = 0
ng = 0
skip = 0
for t in sorted(self.results.keys()):
if self.results[t] is True:
ok += 1
elif self.results[t] == LOG_TEST_UNSUPPORTED:
skip += 1
else:
ng += 1
LOG.info(" %s: %s", t, self.results[t])
LOG.info(LOG_TEST_FINISH, self.unclear == 0, ok, ng, skip)
@handler.set_ev_cls(ofp_event.EventOFPFlowStatsReply,
handler.MAIN_DISPATCHER)
def flow_reply_handler(self, ev):
self.run_verify(ev)
@handler.set_ev_cls(ofp_event.EventOFPStatsReply,
handler.MAIN_DISPATCHER)
def stats_reply_handler(self, ev):
self.run_verify(ev)
def run_verify(self, ev):
msg = ev.msg
dp = msg.datapath
verify_func = self.verify_default
v = "verify" + self.current[4:]
if hasattr(self, v):
verify_func = getattr(self, v)
result = verify_func(dp, msg.body)
if result is True:
self.unclear -= 1
self.results[self.current] = result
self.start_next_test(dp)
@handler.set_ev_cls(dpset.EventDP)
def handler_datapath(self, ev):
if ev.enter:
self.start_next_test(ev.dp)
@set_ev_cls(ofp_event.EventOFPBarrierReply, MAIN_DISPATCHER)
def barrier_replay_handler(self, ev):
pass
def haddr_to_str(self, addr):
return mac.haddr_to_str(addr)
def haddr_to_bin(self, string):
return mac.haddr_to_bin(string)
def haddr_masked(self, haddr_bin, mask_bin):
return mac.haddr_bitand(haddr_bin, mask_bin)
def ipv4_to_str(self, integre):
ip_list = [str((integre >> (24 - (n * 8)) & 255)) for n in range(4)]
return '.'.join(ip_list)
def ipv4_to_int(self, string):
ip = string.split('.')
assert len(ip) == 4
i = 0
for b in ip:
b = int(b)
i = (i << 8) | b
return i
def ipv4_masked(self, ip_int, mask_int):
return ip_int & mask_int
def ipv6_to_str(self, integres):
return ':'.join(hex(x)[2:] for x in integres)
def ipv6_to_int(self, string):
ip = string.split(':')
assert len(ip) == 8
return [int(x, 16) for x in ip]
def ipv6_masked(self, ipv6_int, mask_int):
return [x & y for (x, y) in
itertools.izip(ipv6_int, mask_int)]
def is_supported(self, t):
return True
| apache-2.0 |
kittiu/odoo | addons/account_payment/__openerp__.py | 261 | 2925 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Suppliers Payment Management',
'version': '1.1',
'author': 'OpenERP SA',
'category': 'Accounting & Finance',
'description': """
Module to manage the payment of your supplier invoices.
=======================================================
This module allows you to create and manage your payment orders, with purposes to
---------------------------------------------------------------------------------
* serve as base for an easy plug-in of various automated payment mechanisms.
* provide a more efficient way to manage invoice payment.
Warning:
~~~~~~~~
The confirmation of a payment order does _not_ create accounting entries, it just
records the fact that you gave your payment order to your bank. The booking of
your order must be encoded as usual through a bank statement. Indeed, it's only
when you get the confirmation from your bank that your order has been accepted
that you can book it in your accounting. To help you with that operation, you
have a new option to import payment orders as bank statement lines.
""",
'depends': ['account','account_voucher'],
'data': [
'security/account_payment_security.xml',
'security/ir.model.access.csv',
'wizard/account_payment_pay_view.xml',
'wizard/account_payment_populate_statement_view.xml',
'wizard/account_payment_create_order_view.xml',
'account_payment_view.xml',
'account_payment_workflow.xml',
'account_payment_sequence.xml',
'account_payment_report.xml',
'views/report_paymentorder.xml',
],
'demo': ['account_payment_demo.xml'],
'test': [
'test/account_payment_demo.yml',
'test/cancel_payment_order.yml',
'test/payment_order_process.yml',
'test/account_payment_report.yml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Keisuke69/libcloud | libcloud/loadbalancer/drivers/cloudstack.py | 1 | 4800 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.common.cloudstack import CloudStackConnection, \
CloudStackDriverMixIn
from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm
from libcloud.loadbalancer.base import DEFAULT_ALGORITHM
from libcloud.loadbalancer.types import State, LibcloudLBImmutableError
from libcloud.utils import reverse_dict
class CloudStackLBDriver(CloudStackDriverMixIn, Driver):
"""Driver for CloudStack load balancers."""
api_name = 'cloudstack_lb'
_VALUE_TO_ALGORITHM_MAP = {
'roundrobin': Algorithm.ROUND_ROBIN,
'leastconn': Algorithm.LEAST_CONNECTIONS
}
_ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP)
LB_STATE_MAP = {
'Active': State.RUNNING,
}
def list_protocols(self):
"""We don't actually have any protocol awareness beyond TCP."""
return [ 'tcp' ]
def list_balancers(self):
balancers = self._sync_request('listLoadBalancerRules')
balancers = balancers.get('loadbalancerrule', [])
return [self._to_balancer(balancer) for balancer in balancers]
def get_balancer(self, balancer_id):
balancer = self._sync_request('listLoadBalancerRules', id=balancer_id)
balancer = balancer.get('loadbalancerrule', [])
if not balancer:
raise Exception("no such load balancer: " + str(balancer_id))
return self._to_balancer(balancer[0])
def create_balancer(self, name, members, protocol='http', port=80,
algorithm=DEFAULT_ALGORITHM, location=None,
private_port=None):
if location is None:
locations = self._sync_request('listZones')
location = locations['zone'][0]['id']
else:
location = location.id
if private_port is None:
private_port = port
result = self._async_request('associateIpAddress', zoneid=location)
public_ip = result['ipaddress']
result = self._sync_request('createLoadBalancerRule',
algorithm=self._ALGORITHM_TO_VALUE_MAP[algorithm],
name=name,
privateport=private_port,
publicport=port,
publicipid=public_ip['id'],
)
balancer = self._to_balancer(result['loadbalancer'])
for member in members:
balancer.attach_member(member)
return balancer
def destroy_balancer(self, balancer):
self._async_request('deleteLoadBalancerRule', id=balancer.id)
self._async_request('disassociateIpAddress',
id=balancer.ex_public_ip_id)
def balancer_attach_member(self, balancer, member):
member.port = balancer.ex_private_port
self._async_request('assignToLoadBalancerRule', id=balancer.id,
virtualmachineids=member.id)
return True
def balancer_detach_member(self, balancer, member):
self._async_request('removeFromLoadBalancerRule', id=balancer.id,
virtualmachineids=member.id)
return True
def balancer_list_members(self, balancer):
members = self._sync_request('listLoadBalancerRuleInstances',
id=balancer.id)
members = members['loadbalancerruleinstance']
return [self._to_member(m, balancer.ex_private_port) for m in members]
def _to_balancer(self, obj):
balancer = LoadBalancer(
id=obj['id'],
name=obj['name'],
state=self.LB_STATE_MAP.get(obj['state'], State.UNKNOWN),
ip=obj['publicip'],
port=obj['publicport'],
driver=self.connection.driver
)
balancer.ex_private_port = obj['privateport']
balancer.ex_public_ip_id = obj['publicipid']
return balancer
def _to_member(self, obj, port):
return Member(
id=obj['id'],
ip=obj['nic'][0]['ipaddress'],
port=port
)
| apache-2.0 |
ChrisHirsch/robotframework | src/robot/output/debugfile.py | 22 | 3702 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot import utils
from .logger import LOGGER
from .loggerhelper import IsLogged
def DebugFile(path):
if not path:
LOGGER.info('No debug file')
return None
try:
outfile = open(path, 'w')
except EnvironmentError as err:
LOGGER.error("Opening debug file '%s' failed: %s" % (path, err.strerror))
return None
else:
LOGGER.info('Debug file: %s' % path)
return _DebugFileWriter(outfile)
class _DebugFileWriter:
_separators = {'SUITE': '=', 'TEST': '-', 'KW': '~'}
_setup_or_teardown = ('setup', 'teardown')
def __init__(self, outfile):
self._indent = 0
self._kw_level = 0
self._separator_written_last = False
self._outfile = outfile
self._is_logged = IsLogged('DEBUG')
def start_suite(self, suite):
self._separator('SUITE')
self._start('SUITE', suite.longname)
self._separator('SUITE')
def end_suite(self, suite):
self._separator('SUITE')
self._end('SUITE', suite.longname, suite.elapsedtime)
self._separator('SUITE')
if self._indent == 0:
LOGGER.output_file('Debug', self._outfile.name)
self.close()
def start_test(self, test):
self._separator('TEST')
self._start('TEST', test.name)
self._separator('TEST')
def end_test(self, test):
self._separator('TEST')
self._end('TEST', test.name, test.elapsedtime)
self._separator('TEST')
def start_keyword(self, kw):
if self._kw_level == 0:
self._separator('KW')
self._start(self._get_kw_type(kw), kw.name, kw.args)
self._kw_level += 1
def end_keyword(self, kw):
self._end(self._get_kw_type(kw), kw.name, kw.elapsedtime)
self._kw_level -= 1
def log_message(self, msg):
if self._is_logged(msg.level):
self._write(msg.message, level=msg.level, timestamp=msg.timestamp)
def close(self):
if not self._outfile.closed:
self._outfile.close()
def _get_kw_type(self, kw):
if kw.type in self._setup_or_teardown:
return kw.type.upper()
return 'KW'
def _start(self, type_, name, args=''):
args = ' ' + utils.seq2str2(args)
self._write('+%s START %s: %s%s' % ('-'*self._indent, type_, name, args))
self._indent += 1
def _end(self, type_, name, elapsed):
self._indent -= 1
self._write('+%s END %s: %s (%s)' % ('-'*self._indent, type_, name, elapsed))
def _separator(self, type_):
self._write(self._separators[type_] * 78, separator=True)
def _write(self, text, separator=False, level='INFO', timestamp=None):
if separator and self._separator_written_last:
return
if not separator:
text = '%s - %s - %s' % (timestamp or utils.get_timestamp(),
level, text)
self._outfile.write(text.encode('UTF-8').rstrip() + '\n')
self._outfile.flush()
self._separator_written_last = separator
| apache-2.0 |
neuroidss/nupic.vision | nupicvision/regions/extra/GaborNode2.py | 8 | 141814 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import ctypes
import numpy
try:
# Not normally needed. Not available in demo app.
import hotshot
except:
pass
# Attempt to import OpenCV's ctypes-based bindings
try:
from opencv.cvtypes import cv
except:
cv = None
from StringIO import StringIO
from PIL import (Image,
ImageChops)
from nupic.regions.PyRegion import PyRegion, RealNumpyDType
from nupic.regions.Spec import *
# Global counter used for some debugging operations
id = 0
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# GaborNode
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
class GaborNode2(PyRegion):
"""
Performs dense Gabor filtering upon a multi-resolution grid.
"""
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Class constants
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# The minimum filter size dimension (3x3)
minFilterDim = 3
# The minimum filter size dimension (3x3)
minNumOrients = 0
# List of filter dimensions supported by the optimized
# C library
_optimizedFilterDims = [5, 7, 9, 11, 13]
# Valid parameter values
_validValues = {
'phaseMode': ('single', 'dual'),
'targetType': ('edge', 'line'),
'boundaryMode': ('constrained', 'sweepOff'),
'normalizationMethod': ('fixed', 'max', 'mean'),
'postProcessingMethod': ('raw', 'sigmoid', 'threshold'),
'nta_morphologyMethod': ('best', 'opencv', 'nta'),
}
# Default parameter values
_defaults = {
# Documented parameters:
'filterDim': 9,
'numOrientations': 4,
'phaseMode': 'single',
'centerSurround': False,
'targetType': 'edge',
'gainConstant': 1.0,
'normalizationMethod': 'fixed',
'perPlaneNormalization': False,
'perPhaseNormalization': True,
'postProcessingMethod': 'raw',
'postProcessingSlope': 1.0,
'postProcessingCenter': 0.5,
'postProcessingMin': 0.0,
'postProcessingMax': 1.0,
'zeroThresholdOut': 0.0,
'boundaryMode': 'constrained',
'offImagePixelValue': 0,
'suppressOutsideBox': True,
'forceBoxContraction': False,
'suppressByAlpha': False,
'logPrefix': None,
# Undocumented parameters:
'nta_aspectRatio': 0.3,
'nta_effectiveWidth': 4.5,
'nta_wavelength': 5.6,
'nta_lobeSuppression': True,
'nta_debugLogBuffers': False,
'nta_morphologyMethod': 'best',
}
# Our C implementation performs the 2D convolution using
# integer math, but scales the operands to preserve
# precision. The scaling is done by left shifting the Gabor
# filter coefficients by a fixed number of bits:
_integerMathShifts = 12 # 2^12 = 4096
_integerMathScale = 1 << _integerMathShifts
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Public API calls
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def __init__(self,
# Filter size:
filterDim=None,
# Filter responses:
numOrientations=None,
phaseMode=None,
centerSurround=None,
targetType=None,
# Normalization:
gainConstant=None,
normalizationMethod=None,
perPlaneNormalization=None,
perPhaseNormalization=None,
# Post-processing:
postProcessingMethod=None,
postProcessingSlope=None,
postProcessingCenter=None,
postProcessingMin=None,
postProcessingMax=None,
zeroThresholdOut=None,
# Bounding effects:
boundaryMode=None,
offImagePixelValue=None,
suppressOutsideBox=None,
forceBoxContraction=None,
suppressByAlpha=None,
# Logging
logPrefix=None,
# Additional keywords
**keywds
):
"""
@param filterDim -- The size (in pixels) of both the width and height of the
gabor filters. Defaults to 9x9.
@param numOrientations -- The number of gabor filter orientations to produce.
The half-circle (180 degrees) of rotational angle will be evenly partitioned.
Defaults to 4, which produces a gabor bank containing filters oriented
at 0, 45, 90, and 135 degrees.
@param phaseMode -- The number of separate phases to compute per orientation.
Valid values are: 'single' or 'dual'. In 'single', responses to each such
orientation are rectified by absolutizing them; i.e., a 90-degree edge
will produce the same responses as a 270-degree edge, and the two
responses will be indistinguishable. In "dual" mode, the responses to
each orientation are rectified by clipping at zero, and then creating
a second output response by inverting the raw response and again clipping
at zero; i.e., a 90-degree edge will produce a response only in the
90-degree-oriented plane, and a 270-degree edge will produce a response
only the dual phase plane associated with the 90-degree plane (an
implicit 270-degree plane.) Default is 'single'.
@param centerSurround -- Controls whether an additional filter corresponding to
a non-oriented "center surround" response is applied to the image.
If phaseMode is "dual", then a second "center surround" response plane
is added as well (the inverted version of the center-surround response.)
Defaults to False.
@param targetType -- The preferred "target" of the gabor filters. A value of
'line' specifies that line detectors (peaks in the center and troughs
on either side) are to be used. A value of 'edge' specifies that edge
detectors (with a peak on one side and a trough on the other) are to
be used. Default is 'edge'.
@param gainConstant -- A multiplicative amplifier that is applied to the gabor
responses after any normalization. Defaults to 1.0; larger values
increase the sensitivity to edges.
@param normalizationMethod -- Controls the method by which responses are
normalized on a per image (and per scale) basis. Accepts the following
three legal values:
"fixed": No response normalization;
"max": Applies a global gain value to the responses so that the
max response equals the value of 'gainConstant'
"mean": Applies a global gain value to the responses so that the
mean response equals the value of 'gainConstant'
Default is 'fixed'.
@param perPlaneNormalization -- Controls whether normalization (as specified by
'normalizationMethod') is applied globally across all response planes
(for a given scale), or individually to each response plane. Default
is False. Note: this parameter is ignored if normalizationMethod is "fixed".
@param perPhaseNormalization -- Controls whether normalization (as specified by
'normalizationMethod') is applied globally across both phases for a
particular response orientation and scale, or individually to each
phase of the response. Default is True. Note: this parameter is
ignored if normalizationMethod is "fixed".
@param postProcessingMethod -- Controls what type of post-processing (if any)
is to be performed on the normalized responses. Valid value are:
"raw": No post-processing is performed; final output values are
unmodified after normalization
"sigmoid": Passes normalized output values through a sigmoid function
parameterized by 'postProcessingSlope' and 'postProcessingCenter'.
"threshold": Passes normalized output values through a piecewise linear
thresholding function parameterized by 'postProcessingMin'
and 'postProcessingMax'.
@param postProcessingSlope -- Controls the slope (steepness) of the sigmoid
function used when 'postProcessingMethod' is set to 'sigmoid'.
@param postProcessingCenter -- Controls the center point of the sigmoid function
used when 'postProcessingMethod' is set to 'sigmoid'.
@param postProcessingMin -- If 'postProcessingMethod' is set to 'threshold', all
normalized response values less than 'postProcessingMin' are suppressed to zero.
@param postProcessingMax -- If 'postProcessingMethod' is set to 'threshold', all
normalized response values greater than 'postProcessingMax' are clamped to one.
@param zeroThresholdOut -- if all outputs of a gabor node are below this threshold,
they will all be driven to absolute 0. This is useful in conjunction with
using the product mode/don't care spatial pooler which needs to know when
an input should be treated as 0 vs being normalized to sum to 1.
@param boundaryMode -- Controls how GaborNode deals with boundary effects. Accepts
two valid parameters:
'constrained' -- Gabor responses are normally only computed for image locations
that are far enough from the edge of the input image so that the entire
filter mask fits within the input image. Thus, the spatial dimensions of
the output gabor maps will be smaller than the input image layers.
'sweepOff' -- Gabor responses will be generated at every location within
the input image layer. Thus, the spatial dimensions of the output gabor
maps will be identical to the spatial dimensions of the input image.
For input image locations that are near the edge (i.e., a portion of
the gabor filter extends off the edge of the input image), the values
of pixels that are off the edge of the image are taken to be as specifed
by the parameter 'offImagePixelValue'.
Default is 'constrained'.
@param offImagePixelValue -- If 'boundaryMode' is set to 'sweepOff', then this
parameter specifies the value of the input pixel to use for "filling"
enough image locations outside the bounds of the original image.
Ignored if 'boundaryMode' is 'constrained'. Default value is 0.
@param suppressOutsideBox -- If True, then gabor responses outside of the bounding
box (provided from the sensor) are suppressed. Internally, the bounding
box is actually expanded by half the filter dimension (respecting the edge
of the image, of course) so that responses can be computed for all image
locations within the original bounding box.
@param forceBoxContraction -- Fine-tunes the behavior of bounding box suppression.
If False (the default), then the bounding box will only be 'contracted'
(by the half-width of the filter) in the dimenion(s) in which it is not
the entire span of the image. If True, then the bounding box will be
contracted unconditionally.
@param suppressByAlpha -- A boolean that, if True, instructs GaborNode to use
the pixel-accurate alpha mask received on the input 'validAlphaIn' for
the purpose of suppression of responses.
@param logPrefix -- If non-None, causes the response planes at each scale, and
for each input image, to be written to disk using the specified prefix
for the name of the log images. Default is None (no such logging.)
"""
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
#| The following parameters are for advanced configuration and unsupported at this time |
#| They may be specified via keyword arguments only. |
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
#
# @param nta_aspectRatio -- Controls how "fat" (i.e., how oriented) the Gabor
# filters are. A value of 1 would produce completely non-oriented
# (circular) filters; smaller values will produce a more oriented
# filter. Default is 0.3.
#
# @param nta_effectiveWidth -- Controls the rate of exponential drop-off in
# the Gaussian component of the Gabor filter. Default is 4.5.
#
# @param nta_wavelength -- Controls the frequency of the sinusoidal component
# of the Gabor filter. Default is 5.6.
#
# @param nta_lobeSuppression -- Controls whether or not the secondary lobes of the
# Gabor filters are suppressed. The suppression is performed based
# on the radial distance from the oriented edge to which the Gabor
# filter is tuned. If True, then the secondary lobes produced
# by the pure mathematical Gabor equation will be suppressed
# and have no effect; if False, then the pure mathematical
# Gabor equation (digitized into discrete sampling points, of
# course) will be used. Default is True.
#
# @param nta_debugLogBuffers -- If enabled, causes internal memory buffers used
# C implementation to be dumped to disk after each compute()
# cycle as an aid in the debugging of the C code path.
#
# @param nta_morphologyMethod -- Controls the method to use for performing
# morphological operations (erode or dilate) upon the
# valid alpha masks. Legal values are: 'opencv' (use the
# faster OpenCV routines), 'nta' (use the slower routines,
# or 'best' (use OpenCV if it is available on the platform,
# otherwise use the slower routines.)
#
# ------------------------------------------------------
# Handle hidden/undocumented parameters
for paramName in [p for p in self._defaults if self._isHiddenParam(p)]:
exec("%s = keywds.pop('%s', None)" % (paramName, paramName))
# ------------------------------------------------------
# Assign default values to missing parameters
for paramName, paramValue in self._defaults.items():
if eval(paramName) is None:
exec("%s = paramValue" % paramName)
# ------------------------------------------------------
# Handle deprecated parameters
# Deprecated: numOrients
numOrients = keywds.pop('numOrients', None)
if numOrients:
print "WARNING: 'numOrients' has been deprecated and replaced with 'numOrientations'"
if numOrientations is None:
numOrientations = numOrients
elif numOrients != numOrientations:
print "WARNING: 'numOrients' (%s) is inconsistent with 'numOrientations' (%s) and will be ignored" % \
(str(numOrients), str(numOrientations))
# Deprecated: filterPhase
filterPhase = keywds.pop('filterPhase', None)
if filterPhase:
print "WARNING: 'filterPhase' has been deprecated and replaced with 'targetType'"
if targetType is None:
targetType = filterPhase
elif filterPhase != targetType:
print "WARNING: 'filterPhase' (%s) is inconsistent with 'targetType' (%s) and will be ignored" % \
(str(filterPhase), str(targetType))
# Deprecated: nta_edgeMode
nta_edgeMode = keywds.pop('nta_edgeMode', None)
if nta_edgeMode:
print "WARNING: 'nta_edgeMode' has been deprecated and replaced with 'edgeMode'"
if edgeMode is None:
edgeMode = nta_edgeMode
elif nta_edgeMode != edgeMode:
print "WARNING: 'nta_edgeMode' (%s) is inconsistent with 'edgeMode' (%s) and will be ignored" % \
(str(nta_edgeMode), str(edgeMode))
# Deprecated: lateralInhibition
lateralInhibition = keywds.pop('nta_lateralInhibition', None)
if lateralInhibition:
print "WARNING: 'lateralInhibition' has been deprecated and will not be supported in future releases"
# Deprecated: validityShrinkage
validityShrinkage = keywds.pop('validityShrinkage', None)
if validityShrinkage:
print "WARNING: 'validityShrinkage' has been deprecated and replaced with 'suppressOutsideBox'"
if suppressOutsideBox is None:
suppressOutsideBox = (validityShrinkage >= 0.0)
elif suppressOutsideBox != (validityShrinkage >= 0.0):
print "WARNING: 'validityShrinkage' (%s) is inconsistent with 'suppressOutsideBox' (%s) and will be ignored" % \
(str(validityShrinkage), str(suppressOutsideBox))
self._numScales = None
self.nta_phaseIndex = 0
self._inputPyramidTopology = None
self._outputPyramidTopology = None
self._topDownCombiner = None
self._tdNumParents = None
self._enabledNodes = []
self._nodesWithReceptiveField = None
# These are cached inputs/outputs used for detecting/skipping either the
# bottom up or top down compute to improve performance.
self._cachedRFInput = None
self._cachedBUInput = None
self._cachedBUOutput = None
self._cachedTDInput = None
self._cachedTDOutput = None
self._cachedResetIn = None
self._cachedValidRegionIn = None
self._cachedValidRegionOut = None
# Profiling information
self._profileObj = None
self._iterations = 0
# No longer neede for receptivefields_test, but still needed to satisfy
# an assertion in _checkEphemeralMembers
if not hasattr(self, "_inputSplitter"):
self._inputSplitter = None
self._rfMask = None
self._rfSize = None
self._rfInvLenY = None
self._rfCenterX = None
self._rfCenterY = None
self._rfMinX = None
self._rfMinY = None
self._rfInvLenX = None
self._rfMaxX = None
self._rfMaxY = None
self._initEphemerals()
# ------------------------------------------------------
# Validate each parameter
for paramName in self._defaults.keys():
self._validate(paramName, eval(paramName))
# ------------------------------------------------------
# Store each parameter value
for paramName in self._defaults.keys():
# Hidden parameters have the 'nta_' prefix stripped
#if self._isHiddenParam(paramName):
# internalName = paramName[4:]
#else:
# internalName = paramName
internalName = self._stripHidingPrefixIfPresent(paramName)
exec("self._%s = %s" % (internalName, paramName))
# ------------------------------------------------------
# Perform additional validations that operate on
# combinations/interactions of parameters
self._doHolisticValidation()
# ------------------------------------------------------
# Set up internal state
# This node always get its input as a padded image cube from the ImageSensor
# It may change in the future when ImageSensor supports packed image pyramids
self._gaborBank = None
# Generation of response images must be explicitly enabled
self.disableResponseImages()
# This node type is non-learning, and thus begins life in 'infer' mode.
# This is only needed because our base class requires it.
self._stage = 'infer'
# We are always connected to an image sensor with padded pixels
self._inputPyramidFormat = 'padded'
# Store the number of output planes we'll produce
self._numPlanes = self.getNumPlanes()
# Initially, we do not generate response images
self._makeResponseImages = False
# Where we keep the maxTopDownOut for every node
self._maxTopDownOut = []
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _stripHidingPrefixIfPresent(self, paramName):
"""
If the named parameter is hidden, strip off the
leading "nta_" prefix.
"""
if self._isHiddenParam(paramName):
return paramName[4:]
else:
return paramName
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _isHiddenParam(self, paramName):
"""
Utility method for returning True if 'paramName' is the name
of a hidden parameter.
"""
return paramName.find('nta_') == 0
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def getOutputDims(self, inputDims):
"""
Instance method version of class method
"""
return self.calcOutputDims(inputDims,
self._filterDim,
self._boundaryMode)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def getNumPlanes(self):
"""
Instance method version of class method
"""
return self.calcNumPlanes(self._numOrientations,
self._phaseMode,
self._centerSurround)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
@classmethod
def calcOutputDims(cls, inputDims,
filterDim,
boundaryMode,
**keywds):
"""
Public utility method that computes the output dimensions
in form (height, width), given 'inputDims' (height, width),
for a particular 'filterDim'.
"""
# Assign default values to missing parameters
for paramName in ['filterDim', 'boundaryMode']:
if eval(paramName) is None:
defValue = cls._defaults[paramName]
exec("%s = defValue" % paramName)
# Validatation
cls._validate('filterDim', filterDim)
cls._validate('boundaryMode', boundaryMode)
# Compute output dimensions
if boundaryMode == 'sweepOff':
shrinkage = 0
elif boundaryMode == 'constrained':
shrinkage = filterDim - 1
return tuple([dim - shrinkage for dim in inputDims])
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
@classmethod
def calcNumPlanes(cls, numOrientations=None,
phaseMode=None,
centerSurround=None,
**keywds):
"""
Public utility method that computes the number
of responses planes for a particular Gabor
configuration.
"""
# Assign default values to missing parameters
for paramName in ['numOrientations', 'phaseMode', 'centerSurround']:
if eval(paramName) is None:
defValue = cls._defaults[paramName]
exec("%s = defValue" % paramName)
# Validatation
cls._validate('phaseMode', phaseMode)
cls._validate('numOrientations', numOrientations)
cls._validate('centerSurround', centerSurround)
# Compute output planes
numPlanes = numOrientations
if centerSurround:
numPlanes += 1
if phaseMode == 'dual':
numPlanes *= 2
return numPlanes
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _doHolisticValidation(self):
"""
Perform additional validations that operate on
combinations/interactions of parameters.
"""
# We must have at least one response plane
if self.getNumPlanes() < 1:
raise RuntimeError("Configuration error: no response planes; " \
"either 'numOrientations' must be > 0 or " \
"'centerSurround' must be True")
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
@classmethod
def _validate(cls, name, value):
"""
Validate a parameter. Raises a RunTimeError if
the parameter is invalid.
"""
# ------------------------------------------------------
# Filter size:
# Validation: filterDim
if name == "filterDim":
if type(value) != type(0) or \
value < cls.minFilterDim or \
value % 2 != 1:
raise RuntimeError("Value error: '%s' must be an odd integer >= %d; your value: %s" % \
(name, cls.minFilterDim, str(value)))
# ------------------------------------------------------
# Filter responses:
# Validation: numOrientations
elif name == "numOrientations":
if type(value) != type(0) or \
value < cls.minNumOrients:
raise RuntimeError("Value error: '%s' must be an integers >= %d; your value: %s" % \
(name, cls.minNumOrients, str(value)))
# Validation: phaseMode
elif name == "phaseMode":
if value not in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %s; your value: %s" % \
(name, str(cls._validValues[name]), value))
# Validation: centerSurround
elif name == "centerSurround":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: targetType
elif name == "targetType":
if value not in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %; your value: %s" % \
(name, str(cls._validValues[name]), value))
# ------------------------------------------------------
# Normalization:
elif name == "gainConstant":
if type(value) not in [type(0), type(0.0)] or float(value) < 0.0:
raise RuntimeError("Value error: '%s' must be a float or integer >= 0.0; your value: %s" % \
(name, str(value)))
# Validation: targetType
elif name == "normalizationMethod":
if not value in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %; your value: %s" % \
(name, str(cls._validValues[name]), value))
# Validation: perPlaneNormalization
elif name == "perPlaneNormalization":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: perPhaseNormalization
elif name == "perPhaseNormalization":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# ------------------------------------------------------
# Post-processing:
# Validation: targetType
elif name == "postProcessingMethod":
if not value in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %; your value: %s" % \
(name, str(cls._validValues[name]), value))
# Validation: postProcessingSlope
elif name == "postProcessingSlope":
if type(value) not in [type(0), type(0.0)] or float(value) <= 0.0:
raise RuntimeError("Value error: '%s' must be a float or integer > 0.0; your value: %s" % \
(name, str(value)))
# Validation: postProcessingCenter
elif name == "postProcessingCenter":
if type(value) not in [type(0), type(0.0)]:
raise RuntimeError("Value error: '%s' must be a float or integer; your value: %s" % \
(name, str(value)))
# Validation: postProcessingMin
elif name == "postProcessingMin":
if type(value) not in [type(0), type(0.0)]:
raise RuntimeError("Value error: '%s' must be a float or integer; your value: %s" % \
(name, str(value)))
# Validation: postProcessingMax
elif name == "postProcessingMax":
if type(value) not in [type(0), type(0.0)]:
raise RuntimeError("Value error: '%s' must be a float or integer; your value: %s" % \
(name, str(value)))
# Validation: zeroThresholdOut
elif name == "zeroThresholdOut":
if type(value) not in [type(0), type(0.0)]:
raise RuntimeError("Value error: '%s' must be a float or integer >= 0.0; your value: %s" % \
(name, str(value)))
# ------------------------------------------------------
# Boundary effects:
# Validation: boundaryMode
elif name == "boundaryMode":
if not value in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %; your value: %s" % \
(name, str(cls._validValues[name]), str(value)))
# Validation: offImagePixelValue
elif name == "offImagePixelValue":
if value != 'colorKey' and (type(value) not in (int, float) or float(value) < 0.0 or float(value) > 255.0):
raise RuntimeError("Value error: '%s' must be a float or integer between 0 and 255, or 'colorKey'; your value: %s" % \
(name, str(value)))
# Validation: suppressOutsideBox
elif name == "suppressOutsideBox":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: forceBoxContraction
elif name == "forceBoxContraction":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: suppressByAlpha
elif name == "suppressByAlpha":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# ------------------------------------------------------
# Logging
# Validation: logPrefix
elif name == "logPrefix":
if value is not None and (type(value) != type("") or len(value) == 0):
raise RuntimeError("Value error: '%s' must be a string; your value: %s" % \
(name, str(value)))
# ------------------------------------------------------
# Undocumented parameters:
# Validation: aspectRatio
elif name == "nta_aspectRatio":
if type(value) not in [type(0), type(0.)] or value <= 0.0:
raise RuntimeError("Value error: '%s' must be a float > 0.0; your value: %s" % \
(name, str(value)))
# Validation: effectiveWidth
elif name == "nta_effectiveWidth":
if type(value) not in [type(0), type(0.)] or value <= 0.0:
raise RuntimeError("Value error: '%s' must be a float > 0.0; your value: %s" % \
(name, str(value)))
# Validation: wavelength
elif name == "nta_wavelength":
if type(value) not in [type(0), type(0.)] or value <= 0.0:
raise RuntimeError("Value error: '%s' must be a float > 0.0; your value: %s" % \
(name, str(value)))
# Validation: lobeSuppression
elif name == "nta_lobeSuppression":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: debugLogBuffers
elif name == "nta_debugLogBuffers":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: morphologyMethod
elif name == "nta_morphologyMethod":
if value not in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %; your value: %s" % \
(name, str(cls._validValues[name]), str(value)))
elif value == "opencv" and cv is None:
raise RuntimeError(
"'%s' was explicitly specified as 'opencv' " \
"but OpenCV is not available on this platform" % name)
# ------------------------------------------------------
# Deprecated parameters:
# Validation: numOrients
elif name == "numOrients":
if type(value) != type(0) or \
value < cls.minNumOrients:
raise RuntimeError("Value error: '%s' must be an integers >= %d; your value: %s" % \
(name, cls.minNumOrients, str(value)))
# Validation: lateralInhibition
elif name == "lateralInhibition":
if type(value) not in [type(0), type(0.0)] or value < 0.0 or value > 1.0:
raise RuntimeError("Value error: '%s' must be a float >= 0 and <= 1; your value: %s" % \
(name, str(value)))
# Validation: validityShrinkage
elif name == "validityShrinkage":
if type(value) not in [type(0), type(0.0)] or float(value) < 0.0 or float(value) > 1.0:
raise RuntimeError("Value error: '%s' must be a float or integer between 0 and 1; your value: %s" % \
(name, str(value)))
# ------------------------------------------------------
# Unknown parameter
else:
raise RuntimeError("Unknown parameter: %s [%s]" % (name, value))
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def initialize(self, dims, splitterMaps):
"""Build the gaborfilter bank.
This method is called after construction.
"""
# Preparations (creation of buffer, etc.)
# Send the dims as a tuple that contains one pair. This needed to make
# the node treat its input as a single scale.
self._prepare((dims,))
# Determine the number of response planes
self._numPlanes = self.getNumPlanes()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def getParameter(self, parameterName, nodeSet=""):
"""
Get the value of an PyMultiNode parameter.
@param parameterName -- the name of the parameter to retrieve, as defined
by the Node Spec.
"""
if parameterName in self._defaults:
# Hidden "nta_" parameters are internally stored as
# class attributes without the leading "nta"
if parameterName.startswith("nta_"):
parameterName = parameterName[4:]
return eval("self._%s" % parameterName)
# Handle standard MRG infrastructure
elif parameterName == 'nta_width':
return self._inputPyramidTopology[0]['numNodes'][0]
elif parameterName == 'nta_height':
return self._inputPyramidTopology[0]['numNodes'][1]
# Handle the maxTopDownOut read-only parameter
elif parameterName == 'maxTopDownOut':
return self._maxTopDownOut
# Handle deprecated parameters
elif parameterName == 'numOrients':
return self._numPlanes
elif parameterName == 'filterPhase':
return self._targetType
elif parameterName == 'nta_edgeMode':
return self._boundaryMode
elif parameterName == 'nta_lateralInhibition':
return 0.0
# Unknown parameter (at least by GaborNode)
else:
return PyRegion.getParameter(self, parameterName, nodeSet)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def setParameter(self, parameterName, parameterValue, nodeSet=""):
"""
Set the value of an PyRegion parameter.
@param parameterName -- the name of the parameter to update, as defined
by the Node Spec.
@param parameterValue -- the value to which the parameter is to be set.
"""
# @todo -- Need to add validation of parameter changes
settableParams = ["suppressOutsideBox", "forceBoxContraction",
"suppressByAlpha", "offImagePixelValue",
"perPlaneNormalization", "perPhaseNormalization",
"nta_debugLogBuffers", "logPrefix",
"zeroThresholdOut"]
regenParams = ["gainConstant", "normalizationMethod",
"postProcessingMethod", "postProcessingSlope",
"postProcessingCenter", "postProcessingMin",
"postProcessingMax"]
if parameterName in settableParams + regenParams:
exec("self._%s = parameterValue" % parameterName)
elif parameterName == 'nta_morphologyMethod':
self._morphologyMethod = parameterValue
# Not one of our parameters
else:
return PyRegion.setParameter(self, parameterName, parameterValue, nodeSet)
# Generate post-processing lookup-tables (LUTs) that will be
# used by the C implementation
if parameterName in regenParams:
self._makeLUTs()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def enableResponseImages(self):
"""
Enable the generation of PIL Images representing the Gabor reponses.
"""
self._makeResponseImages = True
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def disableResponseImages(self):
"""
Disable the generation of PIL Images representing the Gabor reponses.
"""
self._makeResponseImages = False
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def getResponseImages(self, whichResponse='all',
preSuppression=False,
whichScale='all',
whichPhase=0,
whichDirection='bottomUp'):
"""
Return a list of PIL Images representing the Gabor responses
computed upon the latest multi-resolution input image pyramid.
@param whichResponse -- Indicates which Gabor orientation response
should be returned. If 'all' (the default), then false
color composite images will be generated that contains the
gabor responses for all orientations. Otherwise, it should
be an integer index between 0 and numOrients-1, in which
case grayscale images will be generated.
@param preSuppression -- Indicates whether the images should be
generated before bounding box suppression is performed
(if True), or after suppression (if False, the default.)
@param whichScale -- Indicates which multi-resolution scale
should be used to generate the response Images. If 'all'
(the default), then images will be generated for each
scale in the input multi-resolution grid, and will be
returned in a list. Otherwise, it should be an integer
index between 0 and numResolutions-1 (the number of
layers in the multi-resolution grid), in which case a
single Image will be returned (not a list).
@param whichDirection -- Indicates which phase of resonse images should
be returned ('bottomUp', 'topDown', 'combined'). 'bottomUp'
gets the unaltered bottom-up responses, 'top-down' gets the
top-down feedback responses, and 'combined'
@returns -- Either a single PIL Image, or a list of PIL Images
that correspond to different resolutions.
"""
# Make sure response images were enabled
if not self._makeResponseImages:
# Need to generate images now
if whichDirection == 'bottomUp':
if self.response is None:
return
response = self.response
elif whichDirection == 'topDown':
if self.tdInput is None:
return
response = self.tdInput
elif whichDirection == 'combined':
if self.selectedBottomUpOut:
return
response = self.selectedBottomUpOut
if response is None:
# No response to use
return
self._genResponseImages(response, preSuppression=preSuppression, phase=whichDirection)
# Make sure we have images to provide
if self._responseImages is None:
return
# Pull subset of images based on 'preSuppression' setting
imageSet = self._responseImages.get(self._getResponseKey(preSuppression))
# Validate format of 'whichScale' arg
numScales = len(self._inputPyramidTopology)
if whichScale != 'all' and (type(whichScale) != type(0) or whichScale < 0 or whichScale >= numScales):
raise RuntimeError, \
"'whichScale' must be 'all' or an integer between 0 and %d." % self._numScales
# Validate format of 'whichResponse' arg
if whichResponse not in ['all', 'centerSurround']:
if type(whichResponse) != type(0) or whichResponse < 0 or whichResponse >= self._numPlanes:
raise RuntimeError, \
"'whichResponse' must be 'all' or an integer between 0 and %d." % self._numPlanes
# Make sure the requested phase of response exists
if not imageSet.has_key(whichDirection):
return
# Handle "exotic" responses
if whichResponse != 'all':
if whichResponse == 'centerSurround':
whichResponse = self._numOrientations
assert type(whichResponse) == type(0)
if whichPhase > 0:
whichResponse += self._numOrientations
if self._centerSurround:
whichResponse += 1
# Return composite gabor response(s)
return imageSet[whichDirection][whichResponse][whichScale]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Public class methods
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
@classmethod
def deserializeImage(cls, serialized):
"""
Helper function that training/testing scripts can invoke in order
to deserialize debugging images provided by the getResponseImages()
method.
"""
image = Image.open(StringIO(serialized))
image.load()
return image
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Private methods - Overriding base class
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
class ARRAY(ctypes.Structure):
_fields_ = [
("nd", ctypes.c_int),
("dimensions", ctypes.c_void_p),
("strides", ctypes.c_void_p),
("data", ctypes.c_void_p),
]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _wrapArray(self, array):
"""
Helper function that takes a numpy array and returns
a 4-tuple consisting of ctypes references to the
following:
(nd, dimensions, strides, data)
"""
if array is None:
return None
else:
return ctypes.byref(self.ARRAY(len(array.ctypes.shape),
ctypes.cast(array.ctypes.shape, ctypes.c_void_p),
ctypes.cast(array.ctypes.strides, ctypes.c_void_p),
array.ctypes.data))
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _prepare(self, inputDims):
"""
Perform one-time preparations need for gabor processing.
"""
#inputDims = [(inputDim['numNodes'][1], inputDim['numNodes'][0]) \
# for inputDim in self._inputPyramidTopology]
self.prepare(inputDims)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def prepare(self, inputDims):
"""
Perform one-time preparations need for gabor processing.
Public interface allowing the GaborNode to be tested
outside of the full RTE.
@param inputDims: a list of input image sizes in the
form of 2-tuples (width, height)
"""
# Reverse the input dims into (height, width) format for internal storage
self._numScales = len(inputDims)
self._inputDims = inputDims
# Compute output dims for each input dim
self._outputDims = [self.getOutputDims(inputDim) for inputDim in inputDims]
# Compute the minimum output dimension
self._minInputDim = min([min(inputDim) for inputDim in self._inputDims])
self._minOutputDim = min([min(outputDim) for outputDim in self._outputDims])
# Break out
self._inHeight, self._inWidth = [float(x) for x in self._inputDims[0]]
self._outHeight, self._outWidth = [float(x) for x in self._outputDims[0]]
# Load the _gaborNode C library
libGabor = self._loadLibrary("_algorithms")
# Prepare the C calls
if libGabor:
self._gaborComputeProc = libGabor.gaborCompute
else:
raise Exception('Unable to load gaborNode C library _algorithms')
# If we could not load the library, then we'll default to
# using numpy for our gabor processing.
self._gaborComputeProc = None
# Prepare some data structures in advance
# Allocate working buffers to be used by the C implementation
#self._buffers = [numpy.zeros(inputDim, dtype=numpy.int32) for inputDim in inputDims]
self._allocBuffers()
# Generate post-processing lookup-tables (LUTs) that will be
# used by the C implementation
self._makeLUTs()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _alignToFour(self, val):
"""
Utility macro that increases a value 'val' to ensure
that it is evenly divisible by four (e.g., for
purposes of memory alignment, etc.)
"""
return (((val - 1) / 4) + 1) * 4
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _makeLUTs(self):
"""
Generate post-processing lookup-tables (LUTs) that will be
used by the C implementation
"""
# --------------------------------------------------
# Define LUT parameters
# For 'normalizationMethod' of 'mean', this internal parameter
# controls the trade-off between how finely we can discretize our
# LUT bins vs. how often a raw response value "overflows" the
# maximum LUT bin and has to be clamped. In essence, any raw
# response value greater than 'meanLutCushionFactor' times the
# mean response for the image will "overflow" and be clamped
# to the response value of the largest bin in the LUT.
meanLutCushionFactor = 4.0
# We'll use a LUT large enough to give us decent precision
# but not so large that it causes cache problems.
# A total of 1024 bins seems reasonable:
numLutShifts = 10
numLutBins = (1 << numLutShifts)
# --------------------------------------------------
# Build LUT
# Build our Gabor Bank if it doesn't already exist
self._buildGaborBankIfNeeded()
# Empirically compute the maximum possible response value
# given our current parameter settings. We do this by
# generating a fake image of size (filterDim X filterDim)
# that has a pure vertical edge and then convolving it with
# the first gabor filter (which is always vertically oriented)
# and measuring the response.
testImage = numpy.ones((self._filterDim, self._filterDim), dtype=numpy.float32) * 255.0
#testImage[:, :(self._filterDim/2)] = 0
testImage[numpy.where(self._gaborBank[0] < 0.0)] *= -1.0
maxRawResponse = (testImage * self._gaborBank[0]).sum()
# At run time our Gabor responses will be scaled (via
# bit shifting) so that we can do integer match instead of
# floating point match, but still have high precision.
# So we'll simulate that in order to get a comparable result.
maxShiftedResponse = maxRawResponse / (255.0 * float(self._integerMathScale))
# Depending on our normalization method, our LUT will have a
# different scaling factor (for pre-scaling values prior
# to discretizing them into LUT bins)
if self._normalizationMethod == 'fixed':
postProcScalar = float(numLutBins - 1) / maxShiftedResponse
elif self._normalizationMethod == 'max':
postProcScalar = float(numLutBins - 1)
elif self._normalizationMethod == 'mean':
postProcScalar = float(numLutBins - 1) / meanLutCushionFactor
else:
assert False
# Build LUT
lutInputs = numpy.array(range(numLutBins), dtype=numpy.float32) / postProcScalar
# Sigmoid: output = 1 / (1 + exp(input))
if self._postProcessingMethod == 'sigmoid':
offset = 1.0 / (1.0 + numpy.exp(self._postProcessingSlope * self._postProcessingCenter))
scaleFactor = 1.0 / (1.0 - offset)
postProcLUT = ((1.0 / (numpy.exp(numpy.clip(self._postProcessingSlope \
* (self._postProcessingCenter - lutInputs), \
-40.0, 40.0)) + 1.0)) - offset) * scaleFactor
# For some parameter choices, it is possible that numerical precision
# issues will result in the 'offset' being ever so slightly larger
# than the value of postProcLUT[0]. This will result in a very
# tiny negative value in the postProcLUT[0] slot, which is
# undesireable because the output of a sigmoid should always
# be bound between (0.0, 1.0).
# So we clip the LUT values to this range just to keep
# things clean.
postProcLUT = numpy.clip(postProcLUT, 0.0, 1.0)
# Threshold: Need piecewise linear LUT
elif self._postProcessingMethod == "threshold":
postProcLUT = lutInputs
postProcLUT[lutInputs < self._postProcessingMin] = 0.0
postProcLUT[lutInputs > self._postProcessingMax] = 1.0
# Raw: no LUT needed at all
else:
assert self._postProcessingMethod == "raw"
postProcLUT = None
# If we are in 'dual' phase mode, then we'll reflect
# the LUT on the negative side of zero to speed up
# processing inside the C function.
if False:
if postProcLUT is not None and self._phaseMode == 'dual':
# Make a reflected LUT
comboLut = numpy.concatenate((numpy.fliplr(postProcLUT[numpy.newaxis,:]),
postProcLUT[numpy.newaxis,:]),
axis=1)
# Now clone the reflected LUT and clip it's responses
# for positive and negative phases
postProcLUT = numpy.concatenate((comboLut, comboLut), axis=1).reshape(4*numLutBins)
# First half of it is for positive phase
postProcLUT[:numLutBins] = 0.0
# Second half of it is for negative phase
postProcLUT[-numLutBins:] = 0.0
# Store our LUT and it's pre-scaling factor
self._postProcLUT = postProcLUT
self._postProcLutScalar = postProcScalar
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _allocBuffers(self):
"""
Allocate some working buffers that are required
by the C implementation.
"""
# Allocate working buffers to be used by the C implementation
#self._buffers = [numpy.zeros(inputDim, dtype=numpy.int32) for inputDim in self._inputDims]
# Compute how much "padding" ou input buffers
# we will need due to boundary effects
if self._boundaryMode == 'sweepOff':
padding = self._filterDim - 1
else:
padding = 0
# For each scale, allocate a set of buffers
# Allocate a working "input buffer" of unsigned int32
# We want our buffers to have rows that are aligned on 16-byte boundaries
#self._bufferSetIn = []
#for inHeight, inWidth in self._inputDims:
# self._bufferSetIn = numpy.zeros((inHeight + padding,
# _alignToFour(inWidth + padding)),
# dtype=numpy.int32)
self._bufferSetIn = [numpy.zeros((inHeight + padding,
self._alignToFour(inWidth + padding)),
dtype=numpy.int32) \
for inHeight, inWidth in self._inputDims]
# Allocate a working plane of "output buffers" of unsigned int32
# We want our buffers to have rows that are aligned on 16-byte boundaries
#self._bufferSetOut = []
#for outHeight, outWidth in self._outputDims:
# self._bufferSetOut += numpy.zeros((self._numOrientations,
# outHeight,
# _alignToFour(outWith)),
# dtype=numpy.int32)
numBuffersNeeded = self._numOrientations
if self._centerSurround:
numBuffersNeeded += 1
self._bufferSetOut = [numpy.zeros((numBuffersNeeded,
outHeight,
self._alignToFour(outWidth)),
dtype=numpy.int32) \
for outHeight, outWidth in self._outputDims]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _initEphemerals(self):
self._gaborComputeProc = None
# For (optional) debug logging, we keep track of the number of
# images we have seen
self._imageCounter = 0
self._bufferSetIn = None
self._bufferSetOut = None
self._morphHeader = None
self._erosion = None
self._numScales = None
self._inputDims = None
self._outputDims = None
self._minInputDim = None
self._minOutputDim = None
self._inHeight = None
self._inWidth = None
self._outHeight = None
self._outWidth = None
self._postProcLUT = None
self._postProcLutScalar = None
self._filterPhase = None
self.response = None
self._responseImages = None
self._makeResponseImages = None
self.tdInput = None
self.selectedBottomUpOut = None
self._tdThreshold = None
self._morphHeader = None
if not hasattr(self, '_numPlanes'):
self._numPlanes = None
# Assign default values to missing parameters
for paramName, paramValue in self._defaults.items():
paramName = self._stripHidingPrefixIfPresent(paramName)
if not hasattr(self, "_%s" % paramName):
exec("self._%s = paramValue" % paramName)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _getEphemeralMembers(self):
"""
Callback (to be overridden) allowing the class to publish a list of
all "ephemeral" members (i.e., data members that should not and/or
cannot be pickled.)
"""
# We can't pickle a pointer to a C function
return [
'_gaborComputeProc',
'_bufferSetIn',
'_bufferSetOut',
'_imageCounter',
'_morphHeader',
'_erosion',
]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _loadLibrary(self, libraryName, libSubDir=None):
"""
Utility method for portably loading a NuPIC shared library.
Note: we assume the library lives in the NuPIC "lib" directory.
@param: libraryName - the name of the library (sans extension)
@returns: reference to the loaded library; otherwise raises
a runtime exception.
"""
# By default, we will look for our shared library in our
# bindings directory.
if not libSubDir:
libSubDir = "bindings"
# Attempt to load the library
try:
# All of these shared libraries are python modules. Let python find them
# for us. Once it finds us the path, we'll load it with CDLL.
dottedPath = ('.'.join(['nupic', libSubDir, libraryName]))
exec("import %s" % dottedPath)
libPath = eval("%s.__file__" % dottedPath)
lib = ctypes.cdll.LoadLibrary(libPath)
# These calls initialize the logging system inside
# the loaded library. Disabled for now.
# See comments at INIT_FROM_PYTHON in gaborNode.cpp
# pythonSystemRefP = PythonSystem.getInstanceP()
# lib.initFromPython(ctypes.c_void_p(pythonSystemRefP))
return lib
except Exception, e:
print "Warning: Could not load shared library: %s" % libraryName
print "Exception: %s" % str(e)
return None
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def compute(self, inputs, outputs):
"""
Run one iteration of fat node, profiling it if requested.
Derived classes should NOT override this method.
The guts of the compute are contained in the _compute() call so that
we can profile it if requested.
"""
# Modify this line to turn on profiling for a given node. The results file
# ('hotshot.stats') will be sensed and printed out by the vision framework's
# RunInference.py script and the end of inference.
# Also uncomment the hotshot import at the top of this file.
if False:
if self._profileObj is None:
self._profileObj = hotshot.Profile("hotshot.stats", 1, 1)
# filename, lineevents, linetimings
self._profileObj.runcall(self._gaborCompute, *[inputs, outputs])
else:
self._gaborCompute(inputs, outputs)
self._imageCounter += 1
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _getUpperLeftPixelValue(self, inputs, validAlpha=None):
"""
Extract the intensity value of the upper-left pixel.
"""
# Obtain raw input pixel data
#buInputVector = inputs['bottomUpIn'][0].array()
buInputVector = inputs['bottomUpIn']
# Respect valid region for selection of
# color key value
pixelIndex = 0
# If we have an alpha channel, then we need to find
# the first pixel for which the alpha is nonzero
if validAlpha is not None:
# Temporarily decode the polarity that is stored
# in the first alpha element
indicatorValue = validAlpha[0,0]
if indicatorValue < 0.0:
validAlpha[0,0] = -1.0 - indicatorValue
alphaLocns = numpy.where(validAlpha >= 0.5)[0]
# Put the indicator back
validAlpha[0,0] = indicatorValue
# If there are no positive alpha pixels anywhere, then
# just use white (255) as the color key (which may not
# be the "correct" thing to do, but we have no other
# options really.
if len(alphaLocns) == 0:
return 255.0;
pixelIndex = alphaLocns[0]
# Otherwise, if we have a bounding box, then we
# need to find the first (upper-left) pixel in
# the valid bounding box
elif 'validRegionIn' in inputs:
#validRegionIn = inputs['validRegionIn'][0].array()
validRegionIn = inputs['validRegionIn']
left = int(validRegionIn[0])
top = int(validRegionIn[1])
if left > 0 or top > 0:
pixelIndex = left + top * int(self._inWidth)
return buInputVector[pixelIndex]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _gaborCompute(self, inputs, outputs):
"""
Run one iteration of multi-node.
We are taking the unconventional approach of overridding the
base class compute() method in order to avoid applying the
splitter map, since this is an expensive process for a densely
overlapped node such as GaborNode.
"""
# Build our Gabor Bank (first time only)
self._buildGaborBankIfNeeded()
# If we are using "color-key" mode, then detect the value of
# the upper-left pixel and use it as the value of
# 'offImagePixelValue'
if self._offImagePixelValue == "colorKey":
offImagePixelValue = self._getUpperLeftPixelValue(inputs)
else:
offImagePixelValue = float(self._offImagePixelValue)
# Fast C implementation
# Get our inputs into numpy arrays
buInputVector = inputs['bottomUpIn']
validRegionIn = inputs.get('validRegionIn', None)
# Obtain access to valid alpha region, if it exists
# and if we are configured to use the pixel-accurate
# alpha validity mask (as opposed to using the
# valid bounding box.)
if self._suppressByAlpha and 'validAlphaIn' in inputs:
if self._numScales > 1:
raise NotImplementedError("Multi-scale GaborNodes cannot currently handle alpha channels")
# We assume alpha channels are expressed in a format in
# which '0.0' corresponds to total suppression of
# responses, and '255.0' corresponds to no suppression
# whatsoever, and intermediate values apply a linearly
# proportional degree of suppression (e.g., a value of
# '127.5' would result in a 50% suppression of the
# raw responses.)
#validAlpha = inputs['validAlphaIn'][0].array()[:, numpy.newaxis] * (1.0/255.0)
validAlpha = inputs['validAlphaIn'][:, numpy.newaxis] * (1.0/255.0)
# If we are using an alpha channel, then it will take
# a bit more work to find the correct "upper left"
# pixel because we can't just look for the first
# upper-left pixel in the valid bounding box; we have
# to find the first upper-left pixel in the actual
# valid alpha zone.
if self._offImagePixelValue == "colorKey":
offImagePixelValue = self._getUpperLeftPixelValue(inputs, validAlpha)
else:
validAlpha = None
if self.nta_phaseIndex == 0: # Do bottom-up inference.
self._computeWithC(buInputVector, validRegionIn,
outputs, offImagePixelValue, validAlpha)
# Cache input. The output is already stored in self.response
if self._topDownCombiner is not None and self._stage == 'infer':
self._cachedBUInput = buInputVector
self._cachedValidRegionIn = validRegionIn
else: # Try top-down inference.
cachedBUInput = self._cachedBUInput \
if self._cachedBUInput is not None else numpy.zeros(0)
validCachedBUInput = numpy.array_equal(buInputVector, cachedBUInput)
cachedValidRegionIn = self._cachedValidRegionIn \
if self._cachedValidRegionIn is not None else numpy.zeros(0)
validCachedValidRegionIn = ((validRegionIn is None) or
numpy.array_equal(validRegionIn, cachedValidRegionIn))
# See if we can use the cached values from the last bottom up compute. For better performance,
# we only perform the cache checking when we know we might have top down computes.
topDownConditionsMet = (self.nta_phaseIndex == 1) and \
(self._stage == 'infer') and \
(self._topDownCombiner is not None) and \
validCachedBUInput and validCachedValidRegionIn
if not topDownConditionsMet:
message = (
("Top-down conditions were not met for GaborNode:\n") +
(" phaseIndex=%s (expected %d)\n" % (self.nta_phaseIndex, 1)) +
(" stage='%s' (expected '%s')\n" % (self._stage, "infer")) +
(" topDownCombiner is %s (expected not None)\n" %
("not None" if (self._topDownCombiner is not None) else "None")) +
(" buInputVector %s cache (expected ==)\n" %
("==" if validCachedBUInput else "!=")) +
(" validRegionIn %s cache (expected ==)\n" %
("==" if validCachedValidRegionIn else "!="))
)
import warnings
warnings.warn(message, stacklevel=2)
return
# No need to copy to the node outputs, they should be the same as last time.
# IMPORTANT: When using the pipeline scheduler, you MUST write to the output buffer
# each time because there are 2 output buffers. But, we know that for feedback
# networks, the pipleline scheduler cannot and will not be used, so it's OK to
# skip the write to the output when we have top down computes.
# Perform the topDown compute instead
#print "Gabor topdown"
buOutput = self.response.reshape(self._inputSplitter.shape[0], self._numPlanes)
PyRegion._topDownCompute(self, inputs, outputs, buOutput,
buInputVector)
# DEBUG DEBUG
#self._logPrefix = "debug"
#print "WARNING: using a hacked version of GaborNode.py [forced logging]"
# Write debugging images
if self._logPrefix is not None:
self._doDebugLogging()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _doDebugLogging(self):
"""
Dump the most recently computed responses to logging image files.
"""
preSuppression = False
# Make the response images if they haven't already been made
if not self._makeResponseImages:
self._genResponseImages(self.response, preSuppression=False)
# Write the response images to disk
imageSet = self._responseImages[self._getResponseKey(preSuppression=False)]['bottomUp']
for orient, orientImages in imageSet.items():
for scale, image in orientImages.items():
if type(scale) == type(0):
if type(orient) == type(0):
orientCode = "%02d" % orient
else:
orientCode = "%s" % orient
debugPath = "%s.img-%04d.scale-%02d.orient-%s.png" % (self._logPrefix,
self._imageCounter,
scale, orientCode)
self.deserializeImage(image).save(debugPath)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def filter(self, image, validRegionIn=None,
orientation='all', phase=0,
scaleIndex=0,
cachedResponse=None,
gain=1.0):
"""
Perform gabor filtering on a PIL image, and return a PIL
image containing the composite responses.
@param validRegion: [left, top, right, bottom]
"""
if validRegionIn is None:
validRegionIn = (0, 0, image.size[0], image.size[1])
# Decide whether or not to use numpy
self._buildGaborBankIfNeeded()
# Determine proper input/output dimensions
inHeight, inWidth = self._inputDims[scaleIndex]
outHeight, outWidth = self._outputDims[scaleIndex]
inputSize = inHeight * inWidth
outputSize = outHeight * outWidth * self._numPlanes
inputVector = numpy.array(image.getdata()).astype(RealNumpyDType)
inputVector.shape = (inHeight, inWidth)
assert image.size[1] == inHeight
assert image.size[0] == inWidth
# Locate correct portion of output
outputVector = numpy.zeros((outHeight, outWidth, self._numPlanes), dtype=RealNumpyDType)
outputVector.shape = (self._numPlanes, outHeight, outWidth)
inputVector.shape = (inHeight, inWidth)
# Use a provided responses
if cachedResponse is not None:
response = cachedResponse
# If we need to re-generate the gabor response cache:
else:
# If we are using "color-key" mode, then detect the value of
# the upper-left pixel and use it as the value of
# 'offImagePixelValue'
if self._offImagePixelValue == "colorKey":
# Respect valid region for selection of
# color key value
[left, top, right, bottom] = validRegionIn
offImagePixelValue = inputVector[top, left]
#offImagePixelValue = inputVector[0, 0]
else:
offImagePixelValue = self._offImagePixelValue
# Extract the bounding box signal (if present).
validPyramid = validRegionIn / numpy.array([self._inWidth,
self._inHeight,
self._inWidth,
self._inHeight],
dtype=RealNumpyDType)
# Compute the bounding box to use for our C implementation
bbox = self._computeBBox(validPyramid, outWidth, outHeight)
imageBox = numpy.array([0, 0, self._inputDims[scaleIndex][1],
self._inputDims[scaleIndex][0]],
dtype=numpy.int32)
# Perform gabor processing
self._doGabor(inputVector, bbox, imageBox, outputVector, scaleIndex, offImagePixelValue)
outputVector = numpy.rollaxis(outputVector, 0, 3)
outputVector = outputVector.reshape(outWidth * outHeight, self._numPlanes).flatten()
assert outputVector.dtype == RealNumpyDType
numLocns = len(outputVector) / self._numPlanes
response = outputVector.reshape(numLocns, self._numPlanes)
nCols, nRows = self._outputPyramidTopology[scaleIndex]['numNodes']
startNodeIdx, stopNodeIdx = self._getNodeRangeByScale(scaleIndex)
# Make composite response
if orientation == 'all':
# Build all the single-orientation responses
responseSet = []
for responseIdx in xrange(self._numPlanes):
img = Image.new('L', (nCols, nRows))
img.putdata((gain * 255.0 * response[:stopNodeIdx-startNodeIdx, responseIdx]).astype(numpy.uint8))
responseSet += [img]
finalResponse = self._makeCompositeImage(responseSet)
# Make an individual response
else:
img = Image.new('L', (nCols, nRows))
if orientation == 'centerSurround':
orientation = self._numOrientations
if phase > 0:
orientation += self._numOrientations
if self._centerSurround:
orientation += 1
img.putdata((gain * 255.0 * response[:stopNodeIdx-startNodeIdx, orientation]).astype(numpy.uint8))
finalResponse = img
return finalResponse, response
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _buildGaborBankIfNeeded(self):
"""
Check to see if we have a Gabor Bank, and if not, then build it.
"""
if self._gaborBank is None:
self._buildGaborBank()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _doCompute(self, rfInput, rfMask, rfSize, resetSignal, validPyramid):
"""
Actual compute() implementation. This is a placeholder that should
be overridden by derived sub-classes
@param inputPyramid -- a list of numpy array containing planes of the
input pyramid.
@param rfMask -- a 2-dimensional numpy array (of same shape as 'inputPyramid')
that contains a value of 0.0 for every element that corresponds
to a padded "dummy" (sentinel) value within 'inputPyramid', and
a value of 1.0 for every real input element.
@param rfSize -- a 1-dimensional numpy array (same number of rows as
'inputPyramid') containing the total number of real (non-dummy)
elements for each row of 'inputPyramid'.
@param reset -- boolean indicating whether the current input is the first
of a new temporal sequence.
@param validPyramid -- a 4-element numpy array (vector) that specifies the
zone in which the input pyramid is "valid". A point in the
pyramid is "valid" if that point maps to a location in the
original image, rather than a "padded" region that was added
around the original image in order to scale/fit it into the
dimensions of the input pyramid.
The 4-element array is in the following format:
[left, top, right, bottom]
where 'left' is the fraction (between 0 and 1) of the width of
the image where the valid zone begins, etc.
Returns:
outputPyramid -- a list of numpy arrays containing planes of the
output pyramid.
"""
numGaborFilters = self._gaborBank.shape[1]
numOutputLocns = rfInput.shape[0]
# ---------------------------------------------------------------
# Conceptual pipeline:
#
# 1. Apply Gabor filtering upon the input pixels X to
# generate raw responses Y0 Even in dual-phase mode,
# we will only need to perform the actual computations
# on a single phase (because the responses can be inverted).
#
# 2. Rectify the raw Gabor responses Y0 to produce rectified
# responses Y1.
#
# 3. Apply an adaptive normalization operation to the
# rectified responses Y1 to produce Y2.
#
# 4. Amplify the normalized responses Y2 by a fixed gain G
# to produce amplified responses Y3.
#
# 5. Apply post-processing upon the amplified responses Y3 to
# produce final responses Z.
#
#----------------------------------
# Step 1 - Raw Gabor filtering:
# Convolve each output location against the complete gabor bank.
responseRaw = numpy.dot(rfInput, self._gaborBank)
#----------------------------------
# Step 2 - Rectify responses:
effectiveInfinity = 1.0e7
if self._phaseMode == 'single':
responseRectified = numpy.abs(responseRaw)
elif self._phaseMode == 'dual':
responseRectified = numpy.concatenate((responseRaw.clip(min=0.0, max=effectiveInfinity),
(-responseRaw).clip(min=0.0, max=effectiveInfinity)),
axis=1)
#----------------------------------
# Step 3 - Adaptive normalization:
# Step 4 - Amplification
# If we are not doing any normalization, then it is easy:
if self._normalizationMethod == 'fixed':
# In 'fixed' mode, we simply apply a default normalization
# that takes into account the fact that the input range
# lies between 0 and 255.
responseAmplified = responseRectified * (self._gainConstant / 255.0)
# Otherwise, we have to perform normalization
else:
# First we'll apply the power rule, if needed
if self._normalizationMethod in ['meanPower', 'maxPower']:
responseToUse = (responseRectified * responseRectified)
elif self._normalizationMethod in ['mean', 'max']:
responseToUse = responseRectified
# At this point, our responseRectified array is of
# the shape (totNumOutputLocns, numOrients)
# First, we will perform the max/mean operation over
# the spatial dimensions; the result will be an
# intermediate array of the shape:
# (numScales, numOrients) which will contain the
# max/mean over the spatial dimensions for each
# scale and orientation.
numLayers = len(self._inputPyramidTopology)
layerOffsets = self._computeLayerOffsets(self._inputPyramidTopology)
responseStats = []
for k in xrange(numLayers):
startOffset = layerOffsets[k]
stopOffset = layerOffsets[k+1]
if self._normalizationMethod in ['max', 'maxPower']:
responseStats += [responseToUse[startOffset:stopOffset].max(axis=0)[numpy.newaxis, :]]
elif self._normalizationMethod in ['mean', 'meanPower']:
responseStats += [responseToUse[startOffset:stopOffset].mean(axis=0)[numpy.newaxis, :]]
responseStats = numpy.array(responseStats).reshape(numLayers, self._numPlanes)
# This should be a numpy array containing the desired statistics
# over the spatial dimensions; one statistic for each tuple
# of (scale, orientation)
# If we used a power law, then take the square root of the statistics
if self._normalizationMethod in ['maxPower', 'meanPower']:
responseStats = numpy.sqrt(responseStats)
# Compute statistics over orientation (if needed)
if not self._perOrientNormalization:
if self._normalizationMethod in ['max', 'maxPower']:
responseStats = responseStats.max(axis=1)
elif self._normalizationMethod in ['mean', 'meanPower']:
responseStats = responseStats.mean(axis=1)
responseStats = responseStats[:, numpy.newaxis]
# At this point, responseStats is of shape: (numLayers, 1)
# Compute statistics over scale (if needed)
if not self._perScaleNormalization:
if self._normalizationMethod in ['max', 'maxPower']:
responseStats = responseStats.max(axis=0)
elif self._normalizationMethod in ['mean', 'meanPower']:
responseStats = responseStats.mean(axis=0)
# Expand back out for each scale
responseStats = responseStats[numpy.newaxis, :] * numpy.ones((numLayers, 1))
# Expand back out for each orientation
if not self._perOrientNormalization:
responseStats = responseStats[:, numpy.newaxis] * numpy.ones((1, self._numPlanes))
# Step 4 - Amplification
responseStats = responseStats.reshape(numLayers, self._numPlanes)
gain = self._gainConstant * numpy.ones((numLayers, self._numPlanes), dtype=RealNumpyDType)
nonZeros = numpy.where(responseStats > 0.0)
gain[nonZeros] /= responseStats[nonZeros]
# Fast usage case: neither per-scale nor per-orient normalization
if not self._perScaleNormalization and not self._perOrientNormalization:
responseAmplified = responseRectified * gain[0, 0]
# Somewhat slower: per-orient (but not per-scale) normalization
elif not self._perScaleNormalization:
responseAmplified = responseRectified * gain[0, :]
# Slowest: per-scale normalization
else:
responseAmplified = None
for k in xrange(numLayers):
startOffset = layerOffsets[k]
stopOffset = layerOffsets[k+1]
if not self._perOrientNormalization:
gainToUse = gain[k, 0]
else:
gainToUse = gain[k, :]
thisResponse = responseRectified[startOffset:stopOffset, :] * gainToUse
if responseAmplified is None:
responseAmplified = thisResponse
else:
responseAmplified = numpy.concatenate((responseAmplified, thisResponse), axis=0)
#----------------------------------
# Step 5 - Post-processing
# No post-processing (linear)
if self._postProcessingMethod == "raw":
responseFinal = responseAmplified
# Sigmoidal post-processing
elif self._postProcessingMethod == "sigmoid":
offset = 1.0 / (1.0 + numpy.exp(self._postProcessingSlope * self._postProcessingCenter))
scaleFactor = 1.0 / (1.0 - offset)
responseFinal = ((1.0 / (numpy.exp(numpy.clip(self._postProcessingSlope \
* (self._postProcessingCenter - responseAmplified), \
-40.0, 40.0)) + 1.0)) - offset) * scaleFactor
# Piece-wise linear post-processing
elif self._postProcessingMethod == "threshold":
responseFinal = responseAmplified
responseFinal[responseAmplified < self._postProcessingMin] = 0.0
responseFinal[responseAmplified > self._postProcessingMax] = 1.0
#----------------------------------
# Optional: Dump statistics for comparative purposes
#self._dumpStats(responseFinal, "gabor.stats.txt")
# Generate raw response images (prior to suppression)
if self._makeResponseImages:
self._genResponseImages(responseFinal, preSuppression=True)
# Apply suppression to responses outside valid pyramid.
if self._suppressOutsideBox:
self._applyValiditySuppression(responseFinal, validPyramid)
# Perform the zeroOutThreshold clipping now if requested
if self._zeroThresholdOut > 0.0:
# Get the max of each node
nodeMax = responseFinal.max(axis=1).reshape(numOutputLocns)
# Zero out children where all elements are below the threshold
responseFinal[nodeMax < self._zeroThresholdOut] = 0
# Generate final response images (after suppression)
if self._makeResponseImages:
self._genResponseImages(responseFinal, preSuppression=False)
# Store the response so that it can be retrieved later
self.response = responseFinal
return responseFinal
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _applyValiditySuppression(self, response, validPyramid):
"""
Apply suppression to responses outside valid pyramid.
This overrides the default PyRegion implementation.
"""
# We compute the valid fraction of each output locations' RF by
# computing the valid fraction of it's spatial dimension.
# @todo -- Generalize this to handle more than two spatial dimensions.
validX = (self._rfMaxX.clip(min=validPyramid[0], max=validPyramid[2]) - \
self._rfMinX.clip(min=validPyramid[0], max=validPyramid[2])) * \
self._rfInvLenX
validY = (self._rfMaxY.clip(min=validPyramid[1], max=validPyramid[3]) - \
self._rfMinY.clip(min=validPyramid[1], max=validPyramid[3])) * \
self._rfInvLenY
# At this point the validX and validY numpy vectors contain values
# between 0 and 1 that encode the validity of each output location
# with respect to the X and Y spatial dimensions, respectively.
# Now we map the raw validities of each output location into
# suppression factors; i.e., a scalar (for each output location)
# that will be multiplied against each response for that particular
# output location.
# Use a hard threshold:
# Discovered a nasty, subtle bug here. The code used to be like this:
#
# suppressionFactor = ((validX * validY) >= self._validitySuppressionLow).astype(RealNumpyDType)
#
# However, in the case of validitySuppressionLow of 1.0, numpy experienced
# "random" roundoff errors, and nodes for which both validX and validY were
# 1.0 would be computed as 1 - epsilon, which would fail the test against
# validitySuppressionLow, and thus get suppressed incorrectly.
# So we introduced an epsilon to deal with this situation.
suppressionFactor = ((validX * validY) + self._epsilon >= \
self._validitySuppressionLow).astype(RealNumpyDType)
# Apply the suppression factor to the output response array
response *= suppressionFactor[:, numpy.newaxis]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _dumpStats(self, response, statsLogPath):
"""
In order to do a kind of "unit testing" of the GaborNode
tuning parameters for a particular application, it is useful
to dump statistics on the responses at different scales
and orientations/phases.
We'll dump the following statistics for each (scale, orientation) tuple:
* response mean
* response standard deviation
* power mean (squared response mean)
* response max
@param response -- response array of shape (totNumOutputLocns, numOrients)
"""
meanResponse = []
meanPower = []
stddevResponse = []
maxResponse = []
# Compute a squared (power) response
power = response * response
# Compute our mean/max/stddev statistics over the spatial dimensions
# for each scale and for each orientation. The result will be four
# array of shape: (numScales, numOrients) which will contain the
# statistics over the spatial dimensions for each scale and orientation.
numLayers = len(self._outputPyramidTopology)
layerOffsets = self._computeLayerOffsets(self._outputPyramidTopology)
for k in xrange(numLayers):
startOffset = layerOffsets[k]
stopOffset = layerOffsets[k+1]
# Mean response
meanResponse += [response[startOffset:stopOffset].mean(axis=0)[numpy.newaxis, :]]
# Max response
maxResponse += [response[startOffset:stopOffset].max(axis=0)[numpy.newaxis, :]]
# Std. deviation response
stddevResponse += [response[startOffset:stopOffset].std(axis=0)[numpy.newaxis, :]]
# Mean power
meanPower += [power[startOffset:stopOffset].mean(axis=0)[numpy.newaxis, :]]
# Now compile the responses at each scale into overall arrays
# of shape: (numScales, numOrientations)
meanResponse = numpy.array(meanResponse).reshape(numLayers, self._numPlanes)
maxResponse = numpy.array(maxResponse).reshape(numLayers, self._numPlanes)
stddevResponse = numpy.array(stddevResponse).reshape(numLayers, self._numPlanes)
meanPower = numpy.array(meanPower).reshape(numLayers, self._numPlanes)
# Finally, form the different statistics into a single desriptive vector
responseStats = numpy.concatenate((meanResponse[numpy.newaxis,:,:],
maxResponse[numpy.newaxis,:,:],
stddevResponse[numpy.newaxis,:,:],
meanPower[numpy.newaxis,:,:]), axis=0)
# Append to the stats log
fpStatsLog = open(statsLogPath, "a")
response = " ".join(["%f" % x for x in responseStats.flatten().tolist()])
fpStatsLog.write(response + "\n")
fpStatsLog.close()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _doTopDownInfer(self, tdInput, tdNumParents, buOutput, buInput):
"""
Actual top down compute() implementation. This is a placeholder that should
be overridden by derived sub-classes.
@param tdInput -- a 3D array containing the top-down inputs to each baby node.
Think of this as N 2D arrays, where N is the number of baby nodes.
Each baby node's 2D array has R rows, where each row is the top-down
output from one of the parents. The width of each row is equal to the
width of the bottomUpOut of the baby node. If a baby node
has only 2 parents, but R is 5 for example, then the last 3 rows
of the 2D array will contain all 0's. The tdNumParents argument
can be referenced to find out how many parents the node actually has.
The tdInput array is structured in this manner to make it easy to
sum the contributions from the parents. All the sub-class needs to
do is a numpy.add.reduce(tdInput, axis=1).
@param tdNumParents a vector whose length is equal to the number of baby nodes. Each
element contains the number of parents of each baby node.
@param buInput -- a 2D array containing the bottom-up inputs to each baby node.
This is the same input that is passed to the _doCompute() method,
but it is called rfInput there.
@param buOutput -- a 2D array containing the results of the bottomUp compute for
this node. This is a copy of the return value returned from the
_doCompute method of the node.
Returns:
tdOutput -- a 2-D numpy array containing the outputs from each baby node. Each
row is a baby node output.
"""
# NOTE: Making this a float32 makes the copy to the node outputs at the end of
# the compute faster.
#tdOutput = numpy.zeros(self._inputSplitter.shape, dtype='float32')
# print "Top-down infer called on a Gabor node. Use breakpoint to step through"
# print "and make sure things are as expected:"
# import pdb; pdb.set_trace()
numBabyNodes = len(tdInput)
numOrients = len(tdInput[0][0])
assert self._numPlanes == numOrients # Number of filters must match top-down input
tdThreshold = numpy.ones((numBabyNodes, numOrients))
version=('tdThreshold', 'combine', 'td_normalize')
minResponse=1e-10
# Average top-down inputs for each baby Node
tdInput_avg = numpy.add.reduce(tdInput, axis=1) / tdNumParents
# For the gabor node, we will usually get 1 orientation fed down from
# the complex level above us. This is because the SparsePooler above that
# sparsified it's inputs and only saves one orientation from each complex node.
# But, for the Gabor node which is at the bottom of the hierarchy, it makes more
# sense to spread the topdown activation among all the orientations since
# each gabor covers only a few pixels and won't select one object from another.
tdMaxes = tdInput_avg.max(axis=1)
tdInput_avg *= 0
tdInput_avg += tdMaxes.reshape(-1,1)
if tdInput_avg.max() <= minResponse:
#print "Top-down Input is Blank"
pass
else:
if 'combine' in version: # Combine top-down and bottom-up inputs
tdInput_avg *= buOutput
if 'td_normalize' in version: # Normalize top-down inputs for viewing
# td_max = tdInput_avg.max()
# tdInput_avg /= td_max
td_max = tdInput_avg.max()
if td_max != 0:
tdInput_avg /= td_max
if 'tdThreshold' in version: # Use tdInput_avg to threshold bottomUp outputs
if not hasattr(self, '_tdThreshold'):
self._tdThreshold = 0.01
tdThreshold = tdInput_avg > self._tdThreshold
self.tdInput = tdInput_avg
self.selectedBottomUpOut = buOutput * tdThreshold
theMax = self.selectedBottomUpOut.max()
if theMax > 0:
self.selectedBottomUpOut /= theMax
# Generate response images
if self._makeResponseImages:
self._genResponseImages(self.tdInput, preSuppression=False, phase='topDown')
self._genResponseImages(self.selectedBottomUpOut, preSuppression=False,
phase='combined')
# Generate the topDown outputs. At this point, tdMaxes contains the max gabor orientation
# output from each baby node. We will simply "spread" this value across all of the
# topDown outputs for each baby node as an indication of their input activation level.
# In a perfect world, you would try and reconstruct the input by summing the inverse of the
# gabor operation for each output orientation. But, for now, we are only using the top
# down output of the Gabor as an indication of the relative input strength to each gabor
# filter - essentially as a mask on the input image.
tdOutput = numpy.ones(self._inputSplitter.shape, dtype='float32')
tdOutput *= tdMaxes.reshape(-1,1)
# Save the maxTopDownOut for each baby node so that it can be returned as a read-only
# parameter. This provides faster performance for things like the top down image inspector
# that only need the max output from each node
self._maxTopDownOut = tdMaxes
return tdOutput
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _computeWithC(self,
inputPlane,
validRegionIn,
outputs,
offImagePixelValue,
validAlpha):
"""
Perform Gabor processing using custom C library.
"""
if validRegionIn is None:
validRegionIn = (0, 0, self._inWidth, self._inHeight)
inputLen = len(inputPlane)
if self._inputPyramidTopology is None or \
inputLen == self._inWidth * self._inHeight * len(self._inputPyramidTopology):
isPadded = True
else:
assert inputLen == sum([lvl['numNodes'][0] * lvl['numNodes'][1] \
for lvl in self._inputPyramidTopology])
isPadded = False
# Extract the bounding box signal (if present).
validPyramid = validRegionIn / numpy.array([self._inWidth,
self._inHeight,
self._inWidth,
self._inHeight],
dtype=RealNumpyDType)
# First extract a numpy array containing the entire input vector
assert inputPlane.dtype == numpy.float32
# Convert the output images to a numpy vector
#outputPlane = outputs['bottomUpOut'].wvector()[:].array()
outputPlane = outputs['bottomUpOut']
assert outputPlane.dtype == numpy.float32
inputOffset = 0
outputOffset = 0
for scaleIndex in xrange(self._numScales):
# Handle padded case (normal)
if isPadded:
inputScaleIndex = 0
# Handle packed case (deployed)
else:
inputScaleIndex = scaleIndex
# Determine proper input/output dimensions
inHeight, inWidth = self._inputDims[inputScaleIndex]
outHeight, outWidth = self._outputDims[scaleIndex]
inputSize = inHeight * inWidth
outputSize = outHeight * outWidth * self._numPlanes
# Locate correct portion of input
inputVector = inputPlane[inputOffset:inputOffset+inputSize]
inputOffset += inputSize
inputVector.shape = (inHeight, inWidth)
# Locate correct portion of output
outputVector = outputPlane[outputOffset:outputOffset+outputSize]
outputVector.shape = (self._numPlanes, outHeight, outWidth)
# Compute the bounding box to use for our C implementation
bbox = self._computeBBox(validPyramid, self._inputDims[scaleIndex][1],
self._inputDims[scaleIndex][0])
imageBox = numpy.array([0, 0, self._inputDims[scaleIndex][1],
self._inputDims[scaleIndex][0]],
dtype=numpy.int32)
## --- DEBUG CODE ----
#global id
#o = inputVector
#print outputVector.shape, len(o)
#f = os.path.abspath('gabor_input_%d.txt' % id)
#print f
#numpy.savetxt(f, o)
#id += 1
##from dbgp.client import brk; brk(port=9019)
## --- DEBUG CODE END ----
# Erode and/or dilate the alpha channel
# @todo -- This should be moved into the C function
if validAlpha is not None:
validAlpha = self._adjustAlphaChannel(validAlpha)
# Perform gabor processing
self._doGabor(inputVector,
bbox,
imageBox,
outputVector,
scaleIndex,
offImagePixelValue,
validAlpha)
# Optionally, dump working buffers for debugging purposes
if self._debugLogBuffers:
self._logDebugBuffers(outputVector, scaleIndex);
# Note: it would be much better if we did not have to do this
# post-processing "transposition" operation, and instead just
# performed all the different orientation computations for
# each pixel.
# Note: this operation costs us about 1 msec
outputVector = numpy.rollaxis(outputVector, 0, 3)
outputVector = outputVector.reshape(outWidth * outHeight, self._numPlanes)
assert outputVector.dtype == numpy.float32
# Perform the zeroOutThreshold clipping now if requested
# @todo -- This should be moved into the C function
if self._zeroThresholdOut > 0.0:
# Get the max of each node
nodeMax = outputVector.max(axis=1).reshape(outWidth * outHeight)
# Zero out children where all elements are below the threshold
outputVector[nodeMax < self._zeroThresholdOut] = 0.0
outputPlane[outputOffset:outputOffset+outputSize] = outputVector.flatten()
outputOffset += outputSize
# Generate final response images (after suppression)
if self._makeResponseImages:
self._genResponseImages(outputPlane, preSuppression=False)
# Store the response so that it can be retrieved later
self.response = outputPlane
## --- DEBUG CODE ----
#global id
#o = outputPlane
##print outputVector.shape, len(o)
#f = os.path.abspath('gabor_output_%d.txt' % id)
#print f
#numpy.savetxt(f, o)
#id += 1
##from dbgp.client import brk; brk(port=9019)
## --- DEBUG CODE END ----
# De-multiplex inputs/outputs
#outputs['bottomUpOut'].wvector()[:] = outputPlane
outputs['bottomUpOut'] = outputPlane
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _adjustAlphaChannel(self, alphaMask):
"""
Apply an alpha suppression channel (in place) to each plane
of gabor responses.
@param alphaMask: a numpy array of shape (numPixels, 1)
containing the alpha mask that determines which responses
are to be suppressed. If the values in the alpha mask
are in the range (0.0, 255.0), then the alpha mask will
be eroded by halfFilterDim; if the values in the alpha
mask are in the range (-255.0, 0.0), then the mask will
be dilated by halfFilterDim.
"""
# Determine whether to erode or dilate.
# In order to make this determination, we check
# the sign of the first alpha pixel:
#
# MorphOp true mask[0,0] alpha[0,0] code
# ======= ============== ===============
# erode 0 (background) 0
# erode 255 (foreground) 255
# dilate 0 (background) -1
# dilate 255 (foreground) -256
indicatorValue = alphaMask[0,0]
if indicatorValue < 0.0:
operation = 'dilate'
# Convert the alpha value back to it's
# true value
alphaMask[0,0] = -1.0 - indicatorValue
else:
operation = 'erode'
# We need to perform enough iterations to cover
# half of the filter dimension
halfFilterDim = (self._filterDim - 1) / 2
if self._morphologyMethod == "opencv" or \
(self._morphologyMethod == "best" and cv is not None):
# Use the faster OpenCV code path
assert cv is not None
# Lazily allocate the necessary OpenCV wrapper structure(s)
self._prepMorphology()
# Make the OpenCV image header structure's pixel buffer
# pointer point at the underlying memory buffer of
# the alpha channel (numpy array)
self._morphHeader.contents.imageData = alphaMask.ctypes.data
# Perform dilation in place
if operation == 'dilate':
cv.Dilate(self._morphHeader, self._morphHeader, iterations=halfFilterDim)
# Perform erosion in place
else:
cv.Erode(self._morphHeader, self._morphHeader, iterations=halfFilterDim)
else:
# Use the custom C++ code path
if not self._erosion:
from nupic.bindings.algorithms import Float32Erosion
self._erosion = Float32Erosion()
self._erosion.init(int(self._inHeight), int(self._inWidth))
# Perform the erosion/dilation in-place
self._erosion.compute(alphaMask,
alphaMask,
halfFilterDim,
(operation=='dilate'))
# Legacy numpy method
# If we are in constrained mode, then the size of our
# response planes will be less than the size of our
# alpha mask (by halfFilterDim along each edge).
# So we need to "shave off" halfFilterDim pixels
# from all edges of the alpha mask before applying
# suppression to the response planes.
inWidth = int(self._inWidth)
inHeight = int(self._inHeight)
# For erosion mode, we need to shave off halfFilterDim
# from the four edges of the alpha mask.
if operation == "erode":
alphaMask.shape = (inHeight, inWidth)
alphaMask[:halfFilterDim, :] = 0.0
alphaMask[-halfFilterDim:, :] = 0.0
alphaMask[:, :halfFilterDim] = 0.0
alphaMask[:, -halfFilterDim:] = 0.0
alphaMask.shape = (inHeight * inWidth, 1)
# For dilation mode, we need to shave off halfFilterDim
# from any edge of the alpha mask that touches the
# image boundary *unless* the alpha mask is "full"
# (i.e., consumes the entire image.)
elif operation == "dilate":
# Handle top, bottom, left, and right
alphaMask.shape = (inHeight, inWidth)
zapTop = numpy.where(alphaMask[0,:])[0]
zapBottom = numpy.where(alphaMask[-1,:])[0]
zapLeft = numpy.where(alphaMask[:,0])[0]
zapRight = numpy.where(alphaMask[:,-1])[0]
# Apply zaps unless all of them are of the full
# length possible
if len(zapTop) < inWidth or len(zapBottom) < inWidth or \
len(zapLeft) < inHeight or len(zapRight) < inHeight:
alphaMask[:halfFilterDim, zapTop] = 0.0
alphaMask[-halfFilterDim:, zapBottom] = 0.0
alphaMask[zapLeft, :halfFilterDim] = 0.0
alphaMask[zapRight, -halfFilterDim:] = 0.0
alphaMask.shape = (inHeight * inWidth, 1)
return alphaMask
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _prepMorphology(self):
"""
Prepare buffers used for eroding/dilating alpha
channels.
"""
# Check if we've already allocated a header
#if not hasattr(self, '_morphHeader'):
if not getattr(self, '_morphHeader', None):
if cv is None:
raise RuntimeError("OpenCV not available on this platform")
# Create a header only (not backed by data memory) that will
# allow us to operate on numpy arrays (valid alpha channels)
# using OpenCV operations
self._morphHeader = cv.CreateImageHeader(cv.Size(int(self._inWidth),
int(self._inHeight)), 32, 1)
# @todo: this will leak a small bit of memory every time
# we create and use a new GaborNode unless we find a way
# to guarantee the invocation of cv.ReleaseImageHeader()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _computeBBox(self, validPyramid, inWidth, inHeight):
"""
Compute a bounding box given the validPyramid (a fraction
of the valid input region as provided by the sensor) and
the output dimensions for a particular current scale.
"""
# Assemble the bounding box by converting 'validPyramid' from float (0,1) to integer (O,N)
if self._suppressOutsideBox:
halfFilterDim = (self._filterDim - 1) / 2
bbox = numpy.round((validPyramid * numpy.array([inWidth, inHeight, inWidth, inHeight],
dtype=validPyramid.dtype))).astype(numpy.int32)
# Subtract enough padding for our filter on all four edges
# We'll only subtract enough padding if we have a non-trivlal bounding box.
# In other words, if our validRegionIn is [0, 25, 200, 175] for input image
# dimensions of [0, 0, 200, 200], then we will assume that two horizontal strips
# of filler pixels were artificially added at the top and bottom, but no
# such artificial vertical strips were added. So we don't need to erode the
# bounding box horizontally, only vertically.
if self._forceBoxContraction or bbox[0] > 0:
bbox[0] += halfFilterDim
if self._forceBoxContraction or bbox[1] > 0:
bbox[1] += halfFilterDim
if self._forceBoxContraction or bbox[2] < inWidth:
bbox[2] -= halfFilterDim
if self._forceBoxContraction or bbox[3] < inHeight:
bbox[3] -= halfFilterDim
# Clip the bounding box to the size of the image
bbox[0] = max(bbox[0], 0)
bbox[1] = max(bbox[1], 0)
bbox[2] = min(bbox[2], inWidth)
bbox[3] = min(bbox[3], inHeight)
# Make sure the bounding box didn't become negative width/height
bbox[0] = min(bbox[0], bbox[2])
bbox[1] = min(bbox[1], bbox[3])
# If absolutely no suppression is requested under any
# circumstances, then force the bbox to be the entire image
else:
bbox = numpy.array([0, 0, inWidth, inHeight], dtype=numpy.int32)
# Check in case bbox is non-existent or mal-formed
if bbox[0] < 0 or bbox[1] < 0 or bbox[2] <= bbox[0] or bbox[3] <= bbox[1]:
print "WARNING: empty or malformed bounding box:", bbox
# Fix bbox so that it is a null box but at least not malformed
if bbox[0] < 0:
bbox[0] = 0
if bbox[1] < 0:
bbox[1] = 0
if bbox[2] < bbox[0]:
bbox[2] = bbox[0]
if bbox[3] < bbox[1]:
bbox[3] = bbox[1]
return bbox
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _logDebugBuffers(self, outputVector, scaleIndex, outPrefix="debug"):
"""
Dump detailed debugging information to disk (specifically, the
state of internal working buffers used by C implementaiton.
@param outPrefix -- Prefix to prepend to standard names
for debugging images.
"""
# Save input buffer
self._saveImage(self._bufferSetIn[scaleIndex],
"%s.buffer.in.%02d.png" % (outPrefix, scaleIndex))
# Save output buffer planes
for k in xrange(self._bufferSetOut[scaleIndex].shape[0]):
# We do integer arithmetic shifted by 12 bits
buf = (self._bufferSetOut[scaleIndex][k] / 4096).clip(min=0, max=255);
self._saveImage(buf, "%s.buffer.out.%02d.%02d.png" % (outPrefix, scaleIndex, k))
# Save raw gabor output images (from C implementation)
for k in xrange(self._numPlanes):
self._saveImage(outputVector[k], "%s.out.%02d.%02d.png" % \
(outPrefix, scaleIndex, k))
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _saveImage(self, imgArray, outPath):
imgDims = imgArray.shape
img = Image.new('L', (imgDims[1], imgDims[0]))
if imgArray.dtype == numpy.float32:
img.putdata( ((254.9 * imgArray.flatten()).clip(min=0.0, max=255.0)).astype(numpy.uint8) )
#img.putdata((255.0 * imgArray.flatten()).astype(numpy.uint8))
elif imgArray.dtype == numpy.int32:
img.putdata((imgArray.flatten()).astype(numpy.uint8))
else:
assert imgArray.dtype == numpy.uint8
img.putdata(imgArray.flatten())
img.save(outPath)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _doGabor(self, inputVector,
bbox,
imageBox,
outputVector,
scaleIndex,
offImagePixelValue=None,
validAlpha=None):
"""
Prepare arguments and invoke C function for
performing actual 2D convolution, rectification,
normalization, and post-processing.
"""
if offImagePixelValue is None:
assert type(offImagePixelValue) in [type(0), type(0.0)]
offImagePixelValue = self._offImagePixelValue
# If we actually have a valid validAlpha mask,
# then reshape it to the input image size
if validAlpha is not None:
origAlphaShape = validAlpha.shape
validAlpha.shape = inputVector.shape
# Invoke C function
result = self._gaborComputeProc(
self._wrapArray(self._gaborBank),
self._wrapArray(inputVector),
self._wrapArray(validAlpha),
self._wrapArray(bbox),
self._wrapArray(imageBox),
self._wrapArray(outputVector),
ctypes.c_float(self._gainConstant),
self._mapParamFromPythonToC('boundaryMode'),
ctypes.c_float(offImagePixelValue),
self._mapParamFromPythonToC('phaseMode'),
self._mapParamFromPythonToC('normalizationMethod'),
self._mapParamFromPythonToC('perPlaneNormalization'),
self._mapParamFromPythonToC('perPhaseNormalization'),
self._mapParamFromPythonToC('postProcessingMethod'),
ctypes.c_float(self._postProcessingSlope),
ctypes.c_float(self._postProcessingCenter),
ctypes.c_float(self._postProcessingMin),
ctypes.c_float(self._postProcessingMax),
self._wrapArray(self._bufferSetIn[scaleIndex]),
self._wrapArray(self._bufferSetOut[scaleIndex]),
self._wrapArray(self._postProcLUT),
ctypes.c_float(self._postProcLutScalar),
)
if result < 0:
raise Exception("gaborCompute failed")
# If we actually have a valid validAlpha mask,
# then reshape it back to it's original shape
if validAlpha is not None:
validAlpha.shape = origAlphaShape
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _convertEnumValue(self, enumValue):
"""
Convert a Python integer object into a ctypes integer
that can be passed to a C function and seen as an
int on the C side.
"""
return ctypes.c_int(enumValue)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _mapParamFromPythonToC(self, paramName):
"""
Map Python object values to equivalent enumerated C values.
"""
# boundaryMode
if paramName == "boundaryMode":
if self._boundaryMode == 'constrained':
enumValue = 0
elif self._boundaryMode == 'sweepOff':
enumValue = 1
return self._convertEnumValue(enumValue)
# phaseMode
elif paramName == "phaseMode":
if self._phaseMode == 'single':
enumValue = 0
elif self._phaseMode == 'dual':
enumValue = 1
return self._convertEnumValue(enumValue)
# normalizationMethod
elif paramName == "normalizationMethod":
if self._normalizationMethod == 'fixed':
enumValue = 0
elif self._normalizationMethod == 'max':
enumValue = 1
elif self._normalizationMethod == 'mean':
enumValue = 2
#elif self._normalizationMethod == 'maxPower':
# enumValue = 3
#elif self._normalizationMethod == 'meanPower':
# enumValue = 4
return self._convertEnumValue(enumValue)
# perPlaneNormalization
elif paramName == "perPlaneNormalization":
if not self._perPlaneNormalization:
enumValue = 0
else:
enumValue = 1
return self._convertEnumValue(enumValue)
# perPhaseNormalization
elif paramName == "perPhaseNormalization":
if not self._perPhaseNormalization:
enumValue = 0
else:
enumValue = 1
return self._convertEnumValue(enumValue)
# postProcessingMethod
elif paramName == "postProcessingMethod":
if self._postProcessingMethod == 'raw':
enumValue = 0
elif self._postProcessingMethod == 'sigmoid':
enumValue = 1
elif self._postProcessingMethod == 'threshold':
enumValue = 2
return self._convertEnumValue(enumValue)
# Invalid parameter
else:
assert False
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Private helper methods
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _getValidEdgeModes(self):
"""
Returns a list of the valid edge modes.
"""
return ['constrained', 'sweepOff']
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _serializeImage(self, image):
"""
Serialize a PIL image so that it can be transported through
the runtime engine.
"""
s = StringIO()
format = 'png'
if hasattr(image, 'format') and image.format:
format = image.format
try:
image.save(s, format=format)
except:
image.save(s, format='png')
return s.getvalue()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _getResponseKey(self, preSuppression):
"""
Returns a key used to index the response image dict
(either 'raw' or 'final')
"""
if preSuppression:
return 'raw'
else:
return 'final'
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _genResponseImages(self, rawResponse, preSuppression, phase='bottomUp'):
"""
Generate PIL images from the response array.
@param preSuppression -- a boolean, which indicates whether to
store the generated images using the key 'raw' (if True)
or 'final' (if False) within the _responseImages member dict.
@param phase -- 'bottomUp', 'topDown', or 'combined', depending on which
phase of response image we're generating
Generate a dict of dicts. The primary dict is keyed by response,
which can be either 'all' or an integer between 0 and numOrients-1;
the secondary dicts are keyed by scale, which can be either 'all'
or an integer between 0 and numScales.
"""
if phase not in ('bottomUp', 'topDown', 'combined'):
raise RuntimeError, "phase must be either 'bottomUp', 'topDown', or 'combined'"
numLocns = len(rawResponse.flatten()) / self._numPlanes
response = rawResponse.reshape(numLocns, self._numPlanes)
#numScales = len(self._inputPyramidTopology)
numScales = self._numScales
imageSet = {}
# Build all the single-orientation responses
for responseIdx in xrange(self._numPlanes):
responseSet = {}
# Build all the scales
for scaleIdx in xrange(numScales):
responseSet[scaleIdx] = self._makeImage(response, scaleIdx, responseIdx)
# Build the "all scale" list
#responseSet['all'] = responseSet.values()
imageSet[responseIdx] = responseSet
# Build the composite respones
responseSet = {}
for scaleIdx in xrange(numScales):
scaleSet = [imageSet[orientIdx][scaleIdx] for orientIdx in xrange(self._numPlanes)]
responseSet[scaleIdx] = self._makeCompositeImage(scaleSet)
imageSet['all'] = responseSet
# Serialize all images
for orientIdx, orientResponses in imageSet.items():
for scaleIdx, scaleResponse in orientResponses.items():
imageSet[orientIdx][scaleIdx] = self._serializeImage(scaleResponse)
imageSet[orientIdx]['all'] = imageSet[orientIdx].values()
# Store the image set
if self._responseImages is None:
self._responseImages = {self._getResponseKey(preSuppression): {}}
self._responseImages[self._getResponseKey(preSuppression)][phase] = imageSet
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _getNodeRangeByScale(self, whichScale):
"""
Returns a 2-tuple of node indices corresponding to the set of
nodes associated with the specified 'whichScale'.
"""
assert whichScale >= 0
#assert whichScale < len(self._outputPyramidTopology)
assert whichScale < self._numScales
startNodeIdx = 0
#for scaleIndex, outputTopo in enumerate(self._outputPyramidTopology):
for scaleIndex, outputDim in enumerate(self._outputDims):
#nCols, nRows = outputTopo['numNodes']
nRows, nCols = outputDim
stopNodeIdx = startNodeIdx + nCols * nRows
if scaleIndex == whichScale:
return (startNodeIdx, stopNodeIdx)
else:
startNodeIdx = stopNodeIdx
assert False
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _makeImage(self, response, whichScale, whichOrient, gain=1.0):
"""
Generate a single PIL image (using the raw response array) for a
particular scale and orientation.
"""
#nCols, nRows = self._outputPyramidTopology[whichScale]['numNodes']
nRows, nCols = self._outputDims[whichScale]
img = Image.new('L', (nCols, nRows))
startNodeIdx, stopNodeIdx = self._getNodeRangeByScale(whichScale)
img.putdata((gain * 255.0 * response[startNodeIdx:stopNodeIdx,
whichOrient]).clip(min=0.0, max=255.0).astype(numpy.uint8))
return img
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _makeCompositeImage(self, imageSet):
"""
Create a false color composite image of the individiual
orientation-specific gabor response images in 'imageSet'.
"""
# Generate the bands
numBands = 3
bands = [Image.new('L',imageSet[0].size)] * numBands
for k, img in enumerate(imageSet):
whichBand = k % numBands
bands[whichBand] = ImageChops.add(bands[whichBand], img)
# Make final composite for this scale
compositeImage = Image.merge(mode='RGB', bands=bands)
return compositeImage
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
if False:
def _getEffectiveOrients(self):
"""
Internal helper method that returns the number of "effective"
orientations (which treats the dual phases responses as a
single orientation.)
"""
numEffectiveOrients = self._numPlanes
if self._phaseMode == 'dual':
numEffectiveOrients /= 2
if self._centerSurround:
numEffectiveOrients -= 1
return numEffectiveOrients
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _buildGaborBank(self):
"""
Build an array of Gabor filters. Also build a 1-D vector of
filter bank indices that maps each output location to a particular
(customized) bank of gabor filters.
"""
# Make sure dimensions of our Gabor filters are odd
assert self._filterDim % 2 == 1
# Create mesh grid indices. The result will be a numpy array of
# shape (2, filterDim, filterDim).
# Then meshGrid[0] stores the row indices of the master grid,
# and meshGrid[1] stores the column indices.
lowerIndex = -(self._filterDim / 2)
upperIndex = 1 + self._filterDim / 2
meshGrid = numpy.mgrid[lowerIndex:upperIndex, lowerIndex:upperIndex]
# If we are supposed to produce only center-surround output
# (no oriented responses), then we will still go through the
# process of making a minimalist bank of 2 oriented gabor
# filters since that is needed by the center-surround filter
# generation code
numOrientations = self._numOrientations
if numOrientations == 0:
numOrientations = 2
# Select the orientation sample points (in radians)
radianInterval = numpy.pi / float(numOrientations)
orientations = numpy.array(range(numOrientations), dtype=RealNumpyDType) * \
radianInterval
# Compute trigonometric functions of orientation
sinTheta = numpy.sin(orientations).reshape(numOrientations, 1, 1)
cosTheta = numpy.cos(orientations).reshape(numOrientations, 1, 1)
# Construct two filterDim X filterDim arrays containing y (row) and
# x (column) coordinates (in dimensions of pixels), respectively.
y = meshGrid[0].reshape(1, self._filterDim, self._filterDim)
x = meshGrid[1].reshape(1, self._filterDim, self._filterDim)
X = x * cosTheta - y * sinTheta
Y = x * sinTheta + y * cosTheta
# Build the Gabor filters
#if hasattr(self, '_phase') and self._phase == 'edge':
if self._targetType == 'edge':
sinusoidalTerm = numpy.sin(2.0 * numpy.pi / self._wavelength * X)
else:
sinusoidalTerm = numpy.cos(2.0 * numpy.pi / self._wavelength * X)
numerator = (X * X + self._aspectRatio * self._aspectRatio * Y * Y)
denominator = -2.0 * self._effectiveWidth * self._effectiveWidth
exponentialTerm = numpy.exp(numerator / denominator)
gaborBank = sinusoidalTerm * exponentialTerm
# Add center-surround filters, if requsted
if self._centerSurround:
expFilter = exponentialTerm[0] * exponentialTerm[numOrientations/2]
# Cubing the raw exponential component seems to give a nice
# center-surround filter
centerSurround = expFilter * expFilter * expFilter
# If our center-surround filter is in addition to the oriented
# filter, then concatenate it to our filter bank; otherwise
# it is the filter bank
if self._numOrientations > 0:
gaborBank = numpy.concatenate((gaborBank, centerSurround[numpy.newaxis,:,:]))
else:
gaborBank = centerSurround[numpy.newaxis,:,:]
# Apply lobe suppression: Suppress the outer lobes of the sinusoidal
# component of the Gabor filters so as to avoid "ringing" effects in
# the Gabor response maps.
#
# We make a single lobe-suppression mask (which is directionally
# oriented.) Then we rotate this mask by each orientation and
# apply it to the pre-suppressed filter bank.
# In order to minimize discontinuities in the gradients, the
# suppression mask will be constructed as follows:
#
# y = 1 - |x|^p
#
# where:
# y = Suppression (0 for total suppression, 1 for no-suppression)
# x = position relative to center
# p = Some exponent that controls the sharpness of suppression
numGaborFilters = gaborBank.shape[0]
# New lobe suppression.
if self._lobeSuppression:
# The orientation is always vertical, so we'll locate the discrete
# filter cell where we go negative
halfFilterDim = (self._filterDim - 1) / 2
firstBadCell = None
for cellIdx in xrange(halfFilterDim, self._filterDim):
if gaborBank[0, 0, cellIdx] < 0.0:
firstBadCell = cellIdx - halfFilterDim
break
if firstBadCell is not None:
radialDist = numpy.abs(X / float(halfFilterDim))
# Establish a radial distance threshold that is halfway
# between the first discrete bad cell and the last good cell.
if firstBadCell == halfFilterDim:
distThresh = 0.5 * (radialDist[0, 0, halfFilterDim + firstBadCell] + \
radialDist[0, 0, halfFilterDim + firstBadCell - 1])
else:
assert firstBadCell < halfFilterDim
# Establish a radial distance threshold that is halfway
# between the first discrete bad cell and the second bad cell.
# This seems to give good results in practice.
distThresh = 0.5 * (radialDist[0, 0, halfFilterDim + firstBadCell] + \
radialDist[0, 0, halfFilterDim + firstBadCell + 1])
suppressTerm = (radialDist < distThresh).astype(RealNumpyDType)
if self._centerSurround:
suppressTerm = numpy.concatenate((suppressTerm,
numpy.ones((1, self._filterDim, self._filterDim),
dtype=RealNumpyDType)))
gaborBank *= suppressTerm
# Normalize so that mean of each filter is zero
means = gaborBank.mean(axis=2).mean(axis=1).reshape(numGaborFilters, 1, 1)
offsets = means.repeat(self._filterDim, axis=1).repeat(self._filterDim, axis=2)
gaborBank -= offsets
# Normalize so that sum of squares over each filter is one
squareSums = (gaborBank * gaborBank).sum(axis=2).sum(axis=1).reshape(numGaborFilters, 1, 1)
scalars = 1.0 / numpy.sqrt(squareSums)
gaborBank *= scalars
# Log gabor filters to disk
if self._logPrefix:
for k in xrange(numGaborFilters):
img = Image.new('L', (self._filterDim, self._filterDim))
minVal = gaborBank[k].min()
gaborFilter = gaborBank[k] - minVal
gaborFilter *= (254.99 / gaborFilter.max())
img.putdata(gaborFilter.flatten().astype(numpy.uint8))
img.save("%s.filter.%03d.png" % (self._logPrefix, k))
# Store the Gabor Bank as a transposed set of 'numOrients' 1-D column-vectors
# which can be easily dot-producted-ed against the split input vectors
# during our compute() calls.
self._gaborBank = (gaborBank.astype(numpy.float32) * 4096.0).astype(numpy.int32)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
@classmethod
def getSpec(cls):
ns = Spec(description = cls.__doc__,
singleNodeOnly=False)
ns.inputs = dict(
bottomUpIn=InputSpec(
description="""The input signal, conceptually organized as an
image pyramid data structure, but internally
organized as a flattened vector.""",
dataType='float',
regionLevel=False,
requireSplitterMap=False),
validRegionIn=InputSpec(
description="""A bounding box around the valid region of the image,
expressed in pixel coordinates; if the first element
of the bounding box is negative, then the valid
region is specified by 'validAlphaIn', in the form
of a non-rectangular alpha channel.""",
dataType='float',
regionLevel=True,
requireSplitterMap=False),
validAlphaIn=InputSpec(
description="""An alpha channel that may be used (in place of the
'validRegionIn' bounding box) to specify the valid
region of the image on a per-pixel basis; the channel
should be an image of identical size to the finest
resolution data input image.""",
dataType='float',
regionLevel=True,
requireSplitterMap=False)
)
ns.outputs = dict(
bottomUpOut=OutputSpec(
description="""The output signal, conceptually organized as an
image pyramid data structure, but internally
organized as a flattened vector.""",
dataType='float',
count=0,
regionLevel=False,
isDefaultOutput=True
),
topDownOut=OutputSpec(
description="""The feedback output signal, sent to the topDownIn
input of the next level down.""",
dataType='float',
count=0,
regionLevel=True)
)
ns.parameters = dict(
# -------------------------------------
# Create/Read-only parameters
filterDim=ParameterSpec(dataType='int', accessMode='Create',
description="""
The size (in pixels) of both the width and height of the
gabor filters. Defaults to 9x9.
""",
defaultValue=9),
numOrientations=ParameterSpec(dataType='int', accessMode='Create',
description="""
The number of gabor filter orientations to produce.
The half-circle (180 degrees) of rotational angle will be evenly partitioned.
Defaults to 4, which produces a gabor bank containing filters oriented
at 0, 45, 90, and 135 degrees.
"""),
phaseMode=ParameterSpec(dataType='str', accessMode='Create',
description="""
The number of separate phases to compute per orientation.
Valid values are: 'single' or 'dual'. In 'single', responses to each such
orientation are rectified by absolutizing them; i.e., a 90-degree edge
will produce the same responses as a 270-degree edge, and the two
responses will be indistinguishable. In "dual" mode, the responses to
each orientation are rectified by clipping at zero, and then creating
a second output response by inverting the raw response and again clipping
at zero; i.e., a 90-degree edge will produce a response only in the
90-degree-oriented plane, and a 270-degree edge will produce a response
only the dual phase plane associated with the 90-degree plane (an
implicit 270-degree plane.) Default is 'single'.
""",
constraints="enum: single, dual",
defaultValue='single'),
centerSurround=ParameterSpec(dataType='int', accessMode='Create',
description="""
Controls whether an additional filter corresponding to
a non-oriented "center surround" response is applied to the image.
If phaseMode is "dual", then a second "center surround" response plane
is added as well (the inverted version of the center-surround response.)
Defaults to False.
""",
defaultValue=0),
targetType=ParameterSpec(dataType='str', accessMode='Create',
description="""
The preferred "target" of the gabor filters. A value of
'line' specifies that line detectors (peaks in the center and troughs
on either side) are to be used. A value of 'edge' specifies that edge
detectors (with a peak on one side and a trough on the other) are to
be used. Default is 'edge'.
""",
constraints="enum: line,edge",
defaultValue='edge'),
gainConstant=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
A multiplicative amplifier that is applied to the gabor
responses after any normalization. Defaults to 1.0; larger values
increase the sensitivity to edges.
"""),
normalizationMethod=ParameterSpec(dataType='str', accessMode='ReadWrite',
description="""
Controls the method by which responses are
normalized on a per image (and per scale) basis. Accepts the following
three legal values:
"fixed": No response normalization;
"max": Applies a global gain value to the responses so that the
max response equals the value of 'gainConstant'
"mean": Applies a global gain value to the responses so that the
mean response equals the value of 'gainConstant'
Default is 'fixed'.
""",
constraints="enum: fixed, mean, max"
),
perPlaneNormalization=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
Controls whether normalization (as specified by
'normalizationMethod') is applied globally across all response planes
(for a given scale), or individually to each response plane. Default
is False. Note: this parameter is ignored if normalizationMethod is "fixed".
""",
),
perPhaseNormalization=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
Controls whether normalization (as specified by
'normalizationMethod') is applied globally across both phases for a
particular response orientation and scale, or individually to each
phase of the response. Default is True. Note: this parameter is
ignored if normalizationMethod is "fixed".
""",
),
postProcessingMethod=ParameterSpec(dataType='str', accessMode='ReadWrite',
description="""
Controls what type of post-processing (if any)
is to be performed on the normalized responses. Valid value are:
"raw": No post-processing is performed; final output values are
unmodified after normalization
"sigmoid": Passes normalized output values through a sigmoid function
parameterized by 'postProcessingSlope' and 'postProcessingCenter'.
"threshold": Passes normalized output values through a piecewise linear
thresholding function parameterized by 'postProcessingMin'
and 'postProcessingMax'.
""",
constraints="enum: raw, sigmoid, threshold"),
postProcessingSlope=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
Specifies the slope of the sigmoid function to apply if the
post-processing mode is set to 'sigmoid'.
"""),
postProcessingCenter=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
Specifies the mid-point of the sigmoid function to apply if the
post-processing mode is set to 'sigmoid'.
"""),
postProcessingMin=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
Specifies the value below which responses will be clipped to zero
when post-processing mode is set to 'threshold'.
"""),
postProcessingMax=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
Specifies the value above which responses will be clipped to one
when post-processing mode is set to 'threshold'.
"""),
zeroThresholdOut=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
If all outputs of a gabor node are below this threshold,
they will all be driven to absolute 0. This is useful in conjunction with
using the product mode/don't care spatial pooler which needs to know when
an input should be treated as 0 vs being normalized to sum to 1.
"""),
boundaryMode=ParameterSpec(dataType='str', accessMode='Create',
description="""
Controls how GaborNode deals with boundary effects. Accepts
two valid parameters:
'constrained' -- Gabor responses are normally only computed for image locations
that are far enough from the edge of the input image so that the entire
filter mask fits within the input image. Thus, the spatial dimensions of
the output gabor maps will be smaller than the input image layers.
'sweepOff' -- Gabor responses will be generated at every location within
the input image layer. Thus, the spatial dimensions of the output gabor
maps will be identical to the spatial dimensions of the input image.
For input image locations that are near the edge (i.e., a portion of
the gabor filter extends off the edge of the input image), the values
of pixels that are off the edge of the image are taken to be as specifed
by the parameter 'offImagePixelValue'.
Default is 'constrained'.
""",
constraints='enum: constrained, sweepOff',
defaultValue='constrained'),
offImagePixelValue=ParameterSpec(dataType="str", accessMode='ReadWrite',
description="""
If 'boundaryMode' is set to 'sweepOff', then this
parameter specifies the value of the input pixel to use for "filling"
enough image locations outside the bounds of the original image.
Ignored if 'boundaryMode' is 'constrained'. Default value is 0.
"""
),
suppressOutsideBox=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
If True, then gabor responses outside of the bounding
box (provided from the sensor) are suppressed. Internally, the bounding
box is actually expanded by half the filter dimension (respecting the edge
of the image, of course) so that responses can be computed for all image
locations within the original bounding box.
"""),
forceBoxContraction=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
Fine-tunes the behavior of bounding box suppression.
If False (the default), then the bounding box will only be 'contracted'
(by the half-width of the filter) in the dimenion(s) in which it is not
the entire span of the image. If True, then the bounding box will be
contracted unconditionally.
"""),
suppressByAlpha=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
A boolean that, if True, instructs GaborNode to use
the pixel-accurate alpha mask received on the input 'validAlphaIn' for
the purpose of suppression of responses.
"""),
logPrefix=ParameterSpec(dataType='str', accessMode='ReadWrite',
description="""
If non-None, causes the response planes at each scale, and
for each input image, to be written to disk using the specified prefix
for the name of the log images. Default is None (no such logging.)
"""),
maxTopDownOut=ParameterSpec(dataType='float', accessMode='Read', count=0,
description="""
The max top-down output from each node. It is faster to access this
variable than to fetch the entire top-down output of every node. The
top down image inspector fetches this parameter (if available)
instead of the topDownOut output variable for better performance.
"""),
# -------------------------------------
# Undocumented parameters
nta_aspectRatio=ParameterSpec(dataType='float', accessMode='Create',
description="""
Controls how "fat" (i.e., how oriented) the Gabor
filters are. A value of 1 would produce completely non-oriented
(circular) filters; smaller values will produce a more oriented
filter. Default is 0.3.
""",
defaultValue=0.3),
nta_effectiveWidth=ParameterSpec(dataType='float', accessMode='Create',
description="""
Controls the rate of exponential drop-off in
the Gaussian component of the Gabor filter. Default is 4.5.
""",
defaultValue=4.5),
nta_wavelength=ParameterSpec(dataType='float', accessMode='Create',
description="""
Controls the frequency of the sinusoidal component
of the Gabor filter. Default is 5.6.
""",
defaultValue=5.6),
nta_lobeSuppression=ParameterSpec(dataType='int', accessMode='Create',
description="""
Controls whether or not the secondary lobes of the
Gabor filters are suppressed. The suppression is performed based
on the radial distance from the oriented edge to which the Gabor
filter is tuned. If True, then the secondary lobes produced
by the pure mathematical Gabor equation will be suppressed
and have no effect; if False, then the pure mathematical
Gabor equation (digitized into discrete sampling points, of
course) will be used. Default is True.
""",
defaultValue=1),
nta_debugLogBuffers=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
If enabled, causes internal memory buffers used
C implementation to be dumped to disk after each compute()
cycle as an aid in the debugging of the C code path.
Defaults to False.
""",
),
nta_width=ParameterSpec(dataType="int", accessMode='Read',
description="""Width of the maximum resolution."""),
nta_height=ParameterSpec(dataType="int", accessMode='Read',
description="""Width of the maximum resolution."""),
nta_morphologyMethod=ParameterSpec(dataType='str', accessMode='ReadWrite',
description="""
Controls the routines used to perform dilation and erosion of
valid alpha masks. Legal values are:
'opencv' -- use faster OpenCV routines;
'nta' -- use the slower Numenta routines;
'best' -- use OpenCV if it is available on the platform,
otherwise use the slower routines.
Default is 'best'.
"""),
)
return ns.toDict()
#---------------------------------------------------------------------------------
def getOutputElementCount(self, name):
"""This method will be called only when the node is used in nuPIC 2"""
if name == 'bottomUpOut':
return self.getNumPlanes()
elif name == 'topDownOut':
return 0
else:
raise Exception('Unknown output: ' + name)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Command line unit testing
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
if __name__=='__main__':
from nupic.engine import Network
n = Network()
gabor = n.addRegion(
'gabor',
'py.GaborNode2',
"""{ filterDim: 5,
numOrientations: 2,
centerSurround: 1,
phaseMode: single,
targetType: edge,
gainConstant: 1.0,
normalizationMethod: max,
postProcessingMethod: threshold,
postProcessingMin: 0.15,
postProcessingMax: 1.0,
boundaryMode: sweepOff,
#suppressOutsideBox: False,
#suppressByAlpha: True,
offImagePixelValue: colorKey,
zeroThresholdOut: 0.003
}""")
print 'Done.'
| gpl-3.0 |
igemsoftware/SYSU-Software2013 | project/Python27_32/Lib/stat.py | 179 | 1718 | """Constants/functions for interpreting results of os.stat() and os.lstat().
Suggested usage: from stat import *
"""
# Indices for stat struct members in the tuple returned by os.stat()
ST_MODE = 0
ST_INO = 1
ST_DEV = 2
ST_NLINK = 3
ST_UID = 4
ST_GID = 5
ST_SIZE = 6
ST_ATIME = 7
ST_MTIME = 8
ST_CTIME = 9
# Extract bits from the mode
def S_IMODE(mode):
return mode & 07777
def S_IFMT(mode):
return mode & 0170000
# Constants used as S_IFMT() for various file types
# (not all are implemented on all systems)
S_IFDIR = 0040000
S_IFCHR = 0020000
S_IFBLK = 0060000
S_IFREG = 0100000
S_IFIFO = 0010000
S_IFLNK = 0120000
S_IFSOCK = 0140000
# Functions to test for each file type
def S_ISDIR(mode):
return S_IFMT(mode) == S_IFDIR
def S_ISCHR(mode):
return S_IFMT(mode) == S_IFCHR
def S_ISBLK(mode):
return S_IFMT(mode) == S_IFBLK
def S_ISREG(mode):
return S_IFMT(mode) == S_IFREG
def S_ISFIFO(mode):
return S_IFMT(mode) == S_IFIFO
def S_ISLNK(mode):
return S_IFMT(mode) == S_IFLNK
def S_ISSOCK(mode):
return S_IFMT(mode) == S_IFSOCK
# Names for permission bits
S_ISUID = 04000
S_ISGID = 02000
S_ENFMT = S_ISGID
S_ISVTX = 01000
S_IREAD = 00400
S_IWRITE = 00200
S_IEXEC = 00100
S_IRWXU = 00700
S_IRUSR = 00400
S_IWUSR = 00200
S_IXUSR = 00100
S_IRWXG = 00070
S_IRGRP = 00040
S_IWGRP = 00020
S_IXGRP = 00010
S_IRWXO = 00007
S_IROTH = 00004
S_IWOTH = 00002
S_IXOTH = 00001
# Names for file flags
UF_NODUMP = 0x00000001
UF_IMMUTABLE = 0x00000002
UF_APPEND = 0x00000004
UF_OPAQUE = 0x00000008
UF_NOUNLINK = 0x00000010
SF_ARCHIVED = 0x00010000
SF_IMMUTABLE = 0x00020000
SF_APPEND = 0x00040000
SF_NOUNLINK = 0x00100000
SF_SNAPSHOT = 0x00200000
| mit |
ar7z1/ansible | test/units/modules/network/f5/test_bigip_trunk.py | 5 | 4616 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_trunk import ApiParameters
from library.modules.bigip_trunk import ModuleParameters
from library.modules.bigip_trunk import ModuleManager
from library.modules.bigip_trunk import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_trunk import ApiParameters
from ansible.modules.network.f5.bigip_trunk import ModuleParameters
from ansible.modules.network.f5.bigip_trunk import ModuleManager
from ansible.modules.network.f5.bigip_trunk import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
interfaces=[
'1.3', '1.1'
],
link_selection_policy='auto',
frame_distribution_hash='destination-mac',
lacp_enabled=True,
lacp_mode='active',
lacp_timeout='long'
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.interfaces == ['1.1', '1.3']
assert p.link_selection_policy == 'auto'
assert p.frame_distribution_hash == 'dst-mac'
assert p.lacp_enabled is True
assert p.lacp_mode == 'active'
assert p.lacp_timeout == 'long'
def test_api_parameters(self):
args = load_fixture('load_tm_net_trunk_1.json')
p = ApiParameters(params=args)
assert p.name == 'foo'
assert p.frame_distribution_hash == 'dst-mac'
assert p.lacp_enabled is False
assert p.lacp_mode == 'active'
assert p.lacp_timeout == 'long'
assert p.interfaces == ['1.3']
assert p.link_selection_policy == 'maximum-bandwidth'
@patch('ansible.module_utils.f5_utils.AnsibleF5Client._get_mgmt_root',
return_value=True)
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
set_module_args(dict(
name='foo',
interfaces=[
'1.3', '1.1'
],
link_selection_policy='auto',
frame_distribution_hash='destination-mac',
lacp_enabled=True,
lacp_mode='active',
lacp_timeout='long',
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['link_selection_policy'] == 'auto'
assert results['frame_distribution_hash'] == 'destination-mac'
assert results['lacp_enabled'] is True
assert results['lacp_mode'] == 'active'
assert results['lacp_timeout'] == 'long'
| gpl-3.0 |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/boto/cloudsearch/optionstatus.py | 185 | 8711 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import time
from boto.compat import json
class OptionStatus(dict):
"""
Presents a combination of status field (defined below) which are
accessed as attributes and option values which are stored in the
native Python dictionary. In this class, the option values are
merged from a JSON object that is stored as the Option part of
the object.
:ivar domain_name: The name of the domain this option is associated with.
:ivar create_date: A timestamp for when this option was created.
:ivar state: The state of processing a change to an option.
Possible values:
* RequiresIndexDocuments: the option's latest value will not
be visible in searches until IndexDocuments has been called
and indexing is complete.
* Processing: the option's latest value is not yet visible in
all searches but is in the process of being activated.
* Active: the option's latest value is completely visible.
:ivar update_date: A timestamp for when this option was updated.
:ivar update_version: A unique integer that indicates when this
option was last updated.
"""
def __init__(self, domain, data=None, refresh_fn=None, save_fn=None):
self.domain = domain
self.refresh_fn = refresh_fn
self.save_fn = save_fn
self.refresh(data)
def _update_status(self, status):
self.creation_date = status['creation_date']
self.status = status['state']
self.update_date = status['update_date']
self.update_version = int(status['update_version'])
def _update_options(self, options):
if options:
self.update(json.loads(options))
def refresh(self, data=None):
"""
Refresh the local state of the object. You can either pass
new state data in as the parameter ``data`` or, if that parameter
is omitted, the state data will be retrieved from CloudSearch.
"""
if not data:
if self.refresh_fn:
data = self.refresh_fn(self.domain.name)
if data:
self._update_status(data['status'])
self._update_options(data['options'])
def to_json(self):
"""
Return the JSON representation of the options as a string.
"""
return json.dumps(self)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'CreationDate':
self.created = value
elif name == 'State':
self.state = value
elif name == 'UpdateDate':
self.updated = value
elif name == 'UpdateVersion':
self.update_version = int(value)
elif name == 'Options':
self.update_from_json_doc(value)
else:
setattr(self, name, value)
def save(self):
"""
Write the current state of the local object back to the
CloudSearch service.
"""
if self.save_fn:
data = self.save_fn(self.domain.name, self.to_json())
self.refresh(data)
def wait_for_state(self, state):
"""
Performs polling of CloudSearch to wait for the ``state``
of this object to change to the provided state.
"""
while self.state != state:
time.sleep(5)
self.refresh()
class IndexFieldStatus(OptionStatus):
def _update_options(self, options):
self.update(options)
def save(self):
pass
class RankExpressionStatus(IndexFieldStatus):
pass
class ServicePoliciesStatus(OptionStatus):
def new_statement(self, arn, ip):
"""
Returns a new policy statement that will allow
access to the service described by ``arn`` by the
ip specified in ``ip``.
:type arn: string
:param arn: The Amazon Resource Notation identifier for the
service you wish to provide access to. This would be
either the search service or the document service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
return {
"Effect":"Allow",
"Action":"*", # Docs say use GET, but denies unless *
"Resource": arn,
"Condition": {
"IpAddress": {
"aws:SourceIp": [ip]
}
}
}
def _allow_ip(self, arn, ip):
if 'Statement' not in self:
s = self.new_statement(arn, ip)
self['Statement'] = [s]
self.save()
else:
add_statement = True
for statement in self['Statement']:
if statement['Resource'] == arn:
for condition_name in statement['Condition']:
if condition_name == 'IpAddress':
add_statement = False
condition = statement['Condition'][condition_name]
if ip not in condition['aws:SourceIp']:
condition['aws:SourceIp'].append(ip)
if add_statement:
s = self.new_statement(arn, ip)
self['Statement'].append(s)
self.save()
def allow_search_ip(self, ip):
"""
Add the provided ip address or CIDR block to the list of
allowable address for the search service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
arn = self.domain.search_service_arn
self._allow_ip(arn, ip)
def allow_doc_ip(self, ip):
"""
Add the provided ip address or CIDR block to the list of
allowable address for the document service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
arn = self.domain.doc_service_arn
self._allow_ip(arn, ip)
def _disallow_ip(self, arn, ip):
if 'Statement' not in self:
return
need_update = False
for statement in self['Statement']:
if statement['Resource'] == arn:
for condition_name in statement['Condition']:
if condition_name == 'IpAddress':
condition = statement['Condition'][condition_name]
if ip in condition['aws:SourceIp']:
condition['aws:SourceIp'].remove(ip)
need_update = True
if need_update:
self.save()
def disallow_search_ip(self, ip):
"""
Remove the provided ip address or CIDR block from the list of
allowable address for the search service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
arn = self.domain.search_service_arn
self._disallow_ip(arn, ip)
def disallow_doc_ip(self, ip):
"""
Remove the provided ip address or CIDR block from the list of
allowable address for the document service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
arn = self.domain.doc_service_arn
self._disallow_ip(arn, ip)
| agpl-3.0 |
tobeyrowe/smallchange | share/qt/make_spinner.py | 4415 | 1035 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
| mit |
davidmueller13/g3_kernel | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
nricklin/PyGithub | github/tests/GistComment.py | 39 | 2819 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import Framework
import datetime
class GistComment(Framework.TestCase):
def setUp(self):
Framework.TestCase.setUp(self)
self.comment = self.g.get_gist("2729810").get_comment(323629)
def testAttributes(self):
self.assertEqual(self.comment.body, "Comment created by PyGithub")
self.assertEqual(self.comment.created_at, datetime.datetime(2012, 5, 19, 7, 7, 57))
self.assertEqual(self.comment.id, 323629)
self.assertEqual(self.comment.updated_at, datetime.datetime(2012, 5, 19, 7, 7, 57))
self.assertEqual(self.comment.url, "https://api.github.com/gists/2729810/comments/323629")
self.assertEqual(self.comment.user.login, "jacquev6")
def testEdit(self):
self.comment.edit("Comment edited by PyGithub")
self.assertEqual(self.comment.body, "Comment edited by PyGithub")
self.assertEqual(self.comment.updated_at, datetime.datetime(2012, 5, 19, 7, 12, 32))
def testDelete(self):
self.comment.delete()
| gpl-3.0 |
elshize/qutebrowser | tests/unit/mainwindow/statusbar/test_backforward.py | 2 | 3211 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2017-2018 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Test Backforward widget."""
import pytest
from qutebrowser.mainwindow.statusbar import backforward
@pytest.fixture
def backforward_widget(qtbot):
widget = backforward.Backforward()
qtbot.add_widget(widget)
return widget
@pytest.mark.parametrize('can_go_back, can_go_forward, expected_text', [
(False, False, ''),
(True, False, '[<]'),
(False, True, '[>]'),
(True, True, '[<>]'),
])
def test_backforward_widget(backforward_widget, tabbed_browser_stubs,
fake_web_tab, can_go_back, can_go_forward,
expected_text):
"""Ensure the Backforward widget shows the correct text."""
tab = fake_web_tab(can_go_back=can_go_back, can_go_forward=can_go_forward)
tabbed_browser = tabbed_browser_stubs[0]
tabbed_browser.current_index = 1
tabbed_browser.tabs = [tab]
backforward_widget.enabled = True
backforward_widget.on_tab_cur_url_changed(tabbed_browser)
assert backforward_widget.text() == expected_text
assert backforward_widget.isVisible() == bool(expected_text)
# Check that the widget stays hidden if not in the statusbar
backforward_widget.enabled = False
backforward_widget.hide()
backforward_widget.on_tab_cur_url_changed(tabbed_browser)
assert backforward_widget.isHidden()
# Check that the widget gets reset if empty.
if can_go_back and can_go_forward:
tab = fake_web_tab(can_go_back=False, can_go_forward=False)
tabbed_browser.tabs = [tab]
backforward_widget.enabled = True
backforward_widget.on_tab_cur_url_changed(tabbed_browser)
assert backforward_widget.text() == ''
assert not backforward_widget.isVisible()
def test_none_tab(backforward_widget, tabbed_browser_stubs, fake_web_tab):
"""Make sure nothing crashes when passing None as tab."""
tab = fake_web_tab(can_go_back=True, can_go_forward=True)
tabbed_browser = tabbed_browser_stubs[0]
tabbed_browser.current_index = 1
tabbed_browser.tabs = [tab]
backforward_widget.enabled = True
backforward_widget.on_tab_cur_url_changed(tabbed_browser)
assert backforward_widget.text() == '[<>]'
assert backforward_widget.isVisible()
tabbed_browser.current_index = -1
backforward_widget.on_tab_cur_url_changed(tabbed_browser)
assert backforward_widget.text() == ''
assert not backforward_widget.isVisible()
| gpl-3.0 |
BBN-Q/Quince | quince/param.py | 1 | 16560 | # coding: utf-8
# Raytheon BBN Technologies 2016
# Contributiors: Graham Rowlands
#
# This file contains the parameter descriptions
from qtpy.QtGui import *
from qtpy.QtCore import *
from qtpy.QtWidgets import *
import os
class Parameter(QGraphicsEllipseItem):
"""docstring for Parameter"""
def __init__(self, name, parent=None):
self.name = name
self.parent = parent
rad = 5
super(Parameter, self).__init__(-rad, -rad, 2*rad, 2*rad, parent=parent)
self.has_input = True # Do we draw the connector?
self.interactive = True # Can we modify the value?
self.setBrush(QBrush(QColor(200,200,240)))
self.setPen(Qt.black)
self.setZValue(1)
self.height = 36
self.height_collapsed = 15
self.temp_wire = None
self.wires_in = []
self.wires_out = []
# Text label and area
self.label = QGraphicsTextItem(self.name, parent=self)
self.label.setDefaultTextColor(Qt.black)
self.label.setPos(5,-10)
# Value Box
self.value_box = None
def set_changed_flag(self):
# Would prefer to use signals/slots, but that's apparently too heavy for QGraphics
# Instead we add the name of the changed parameter to the list
if self.parent is not None and not self.parent.changing:
self.parent.changing = True
self.parent.value_changed( self.name )
def set_interactive(self, value):
self.interactive = value
self.value_box.interactive = value
def set_collapsed(self, collapsed):
self.collapsed = collapsed
self.value_box.setVisible(not self.collapsed)
def width(self):
return self.label.boundingRect().topRight().x()
def set_box_width(self, width):
self.value_box.set_box_width(width)
def value(self):
return self.value_box.value()
def set_value(self, value):
self.value_box.set_value(value)
self.set_changed_flag()
def paint(self, painter, options, widget):
if self.has_input:
super(Parameter, self).paint(painter, options, widget)
class NumericalParameter(Parameter):
"""docstring for Parameter"""
def __init__(self, name, datatype, min_value, max_value,
increment, snap, parent=None):
super(NumericalParameter, self).__init__(name, parent=parent)
self.datatype = datatype
self.value_box = SliderBox(
datatype, min_value, max_value, increment, snap,
parent=self)
def set_value(self, value):
self.value_box.set_value(self.datatype(value))
self.set_changed_flag()
class StringParameter(Parameter):
"""docstring for Parameter"""
def __init__(self, name, parent=None):
super(StringParameter, self).__init__(name, parent=parent)
self.value_box = StringBox(parent=self)
self.parent = parent
def set_value(self, value):
self.value_box.set_value(value)
class ComboParameter(StringParameter):
"""docstring for Parameter"""
def __init__(self, name, values, parent=None):
super(ComboParameter, self).__init__(name, parent=parent)
self.value_box.setParentItem(None)
self.value_box = ComboBox(values, parent=self)
def set_collapsed(self, collapsed):
self.collapsed = collapsed
self.value_box.setVisible(not self.collapsed)
class BooleanParameter(Parameter):
"""docstring for Parameter"""
def __init__(self, name, parent=None):
super(BooleanParameter, self).__init__(name, parent=parent)
self.value_box = CheckBox(parent=self)
self.height = 15
self.height_collapsed = 15
def width(self):
return self.label.boundingRect().topRight().x() + 18
class FilenameParameter(StringParameter):
"""docstring for Parameter"""
def __init__(self, name, parent=None):
super(FilenameParameter, self).__init__(name, parent=parent)
self.value_box.setParentItem(None)
self.value_box = FilenameBox(parent=self)
def width(self):
return self.label.boundingRect().topRight().x() + 20
class SliderBox(QGraphicsRectItem):
"""docstring for SliderBox"""
def __init__(self, datatype, min_value, max_value, increment, snap, parent=None):
super(SliderBox, self).__init__(parent=parent)
self.parent = parent
self.dragging = False
self.value_changed = False
self.interactive = True
self.datatype = datatype
self.min_value = min_value
self.max_value = max_value
self.increment = increment
self.snap = snap
self._value = min_value
self.height = 14
self.rect_radius = 7.0
self.control_distance = 0.55228*self.rect_radius
self.setRect(3,15,94,self.height)
self.label = ValueBoxText(self.textFromValue(self._value), parent=self)
label_width = self.label.boundingRect().topRight().x()
self.label.setPos(3+0.5*self.rect().width()-0.5*label_width,15-5)
def paint(self, painter, options, widget):
# Background object is a rounded rectangle
linear_gradient = QLinearGradient(self.rect().topLeft(), self.rect().bottomLeft())
linear_gradient.setColorAt(0, QColor(150,150,150))
linear_gradient.setColorAt(1, QColor(200,200,200))
painter.RenderHint(QPainter.Antialiasing)
painter.setBrush(QBrush(linear_gradient))
painter.setPen(QPen(QColor(200,200,200), 0.75))
painter.drawRoundedRect(self.rect(), self.rect_radius, self.rect_radius)
# Draw the bar using a round capped line
linear_gradient = QLinearGradient(self.rect().topLeft(), self.rect().topRight())
linear_gradient.setColorAt(0, QColor(180,180,220))
linear_gradient.setColorAt(1, QColor(80,80,100))
painter.setPen(QPen(QBrush(linear_gradient), 0.9*self.height, Qt.SolidLine, Qt.RoundCap))
path = QPainterPath()
path.moveTo(3+self.rect_radius, 15 + 0.5*self.height)
fill_size = (self.rect().width()-2*self.rect_radius)*(self._value-self.min_value)/(self.max_value-self.min_value)
path.lineTo(3+self.rect_radius+fill_size, 7.5 + 0.5+self.height)
painter.drawPath(path)
# Draw the highlight line similarly
linear_gradient = QLinearGradient(self.rect().topLeft(), self.rect().bottomLeft())
linear_gradient.setColorAt(0, QColor(240,240,240,150))
linear_gradient.setColorAt(0.3, QColor(240,240,240,00))
painter.setPen(QPen(QBrush(linear_gradient), 0.9*self.height, Qt.SolidLine, Qt.RoundCap))
path = QPainterPath()
path.moveTo(3+self.rect_radius, 15.0 + 0.5*self.height)
path.lineTo(3+self.rect_radius+fill_size, 15.0 + 0.5*self.height)
painter.drawPath(path)
def valueFromText(self, text):
try:
if self.datatype is int:
val = int(str(text))
else:
val = float(str(text))
return val
except:
self.scene().window.set_status("Got unreasonable input...")
return self._value
def textFromValue(self, value):
if self.datatype is int:
return ("{:d}".format(value))
else:
return ("{:.4g}".format(value))
def set_value(self, val):
changed = False
val = self.valueFromText(val)
if val >= self.min_value and val <= self.max_value:
if self.snap:
val = (val/self.snap)*self.snap
self._value = self.datatype(val)
changed = True
elif val < self.min_value:
self._value = self.datatype(self.min_value)
changed = True
else:
self._value = self.datatype(self.max_value)
changed = True
self.label.full_text = self.textFromValue(self._value)
self.label.setPlainText(self.textFromValue(self._value))
self.refresh_label()
self.update()
if changed:
self.parent.set_changed_flag()
def refresh_label(self):
label_width = self.label.boundingRect().topRight().x()
self.label.setPos(3+0.5*self.rect().width()-0.5*label_width,15-5)
self.update()
def value(self):
return self._value
def set_box_width(self, width):
self.setRect(3,15, width-6, self.height)
label_width = self.label.boundingRect().topRight().x()
self.label.clip_text()
self.label.setPos(3+0.5*self.rect().width()-0.5*label_width,15-5)
def mousePressEvent(self, event):
if self.interactive:
self.dragging = True
self.original_value = self._value
self.drag_start = event.scenePos()
else:
super(SliderBox, self).mouseMoveEvent(event)
def mouseMoveEvent(self, event):
if self.interactive:
if self.dragging:
delta = event.scenePos() - self.drag_start
value_change = self.increment*int(delta.x()/10.0)
if value_change != 0.0:
self.value_changed = True
self.set_value(self.original_value + value_change)
else:
super(SliderBox, self).mouseMoveEvent(event)
def mouseReleaseEvent(self, event):
if self.interactive:
self.dragging = False
if not self.value_changed:
self.label.setPos(3+5,15-5)
self.label.set_text_interaction(True)
self.value_changed = False
else:
super(SliderBox, self).mouseMoveEvent(event)
class StringBox(QGraphicsRectItem):
"""docstring for SliderBox"""
def __init__(self, parent=None):
super(StringBox, self).__init__(parent=parent)
self.clicked = False
self._value = ""
self.height = 14
self.rect_radius = 7.0
self.control_distance = 0.55228*self.rect_radius
self.setRect(3,15,94,self.height)
self.label = ValueBoxText(self._value, parent=self)
label_width = self.label.boundingRect().topRight().x()
self.label.setPos(3+0.5*self.rect().width()-0.5*label_width,15-5)
def paint(self, painter, options, widget):
# Background object is a rounded rectangle
linear_gradient = QLinearGradient(self.rect().topLeft(), self.rect().bottomLeft())
linear_gradient.setColorAt(0, QColor(150,150,150))
linear_gradient.setColorAt(1, QColor(200,200,200))
painter.RenderHint(QPainter.Antialiasing)
painter.setBrush(QBrush(linear_gradient))
painter.setPen(QPen(QColor(200,200,200), 0.75))
painter.drawRoundedRect(self.rect(), self.rect_radius, self.rect_radius)
def set_value(self, value):
self._value = value
self.label.full_text = value
self.label.setPlainText(value)
self.label.clip_text()
self.refresh_label()
self.update()
if hasattr(self, 'parent'):
self.parent.set_changed_flag()
def refresh_label(self):
label_width = self.label.boundingRect().topRight().x()
self.label.setPos(3+0.5*self.rect().width()-0.5*label_width,15-5)
self.update()
def value(self):
return self._value
def set_box_width(self, width):
self.setRect(3,15, width-6, self.height)
self.label.clip_text()
self.refresh_label()
def mousePressEvent(self, event):
self.clicked = True
def mouseReleaseEvent(self, event):
if self.clicked:
self.label.setPos(3+5,15-5)
self.label.set_text_interaction(True)
self.clicked = False
class FilenameBox(StringBox):
"""docstring for FilenameBox"""
def __init__(self, parent=None):
super(FilenameBox, self).__init__(parent=parent)
self.browse_button = QGraphicsRectItem(self.rect().width()-16, -3, 15, 12, parent=self)
self.browse_button.setBrush(QBrush(QColor(220,220,220)))
self.browse_button.mousePressEvent = lambda e: self.save_file()
# self.browse_button.mouseReleaseEvent = lambda e: self.save_file()
def save_file(self):
path = os.path.dirname(os.path.realpath(__file__))
fn = QFileDialog.getSaveFileName(None, 'Save Results As', path)
self.set_value(fn[0])
self.label.clip_text()
self.refresh_label()
def refresh_label(self):
label_width = self.label.boundingRect().topRight().x()
self.label.setPos(3+0.5*self.rect().width()-0.5*label_width,15-5)
self.browse_button.setRect(self.rect().width()-16, -3, 15, 12)
self.update()
class ComboBox(StringBox):
"""docstring for ComboBox"""
def __init__(self, values, parent=None):
super(ComboBox, self).__init__(parent=parent)
self.values = values
def menu_changed(self, action):
self.set_value(action.data())
def mousePressEvent(self, event):
self.clicked = True
def mouseReleaseEvent(self, event):
if self.clicked:
menu = QMenu()
for v in self.values:
act = QAction(v, self.scene())
act.setData(v)
menu.addAction(act)
menu.triggered.connect(self.menu_changed)
menu.exec_(event.screenPos())
self.clicked = False
class CheckBox(QGraphicsRectItem):
"""docstring for CheckBox"""
def __init__(self, parent=None):
super(CheckBox, self).__init__(parent=parent)
self.parent = parent
self.setRect(self.rect().width()-17, -3, 13, 13)
self.unchecked_brush = QBrush(QColor(220,220,220))
self.checked_brush = QBrush(QColor(40,40,40))
self.setBrush(self.unchecked_brush)
self._value = False
self.clicked = False
def set_box_width(self, width):
self.setRect(width-17, -3, 13, 13)
def value(self):
return self._value
def set_value(self, value):
self._value = value
if self._value:
self.setBrush(self.checked_brush)
else:
self.setBrush(self.unchecked_brush)
def mousePressEvent(self, event):
self.clicked = True
def mouseReleaseEvent(self, event):
if self.clicked:
self.set_value(not self._value)
self.clicked = False
class ValueBoxText(QGraphicsTextItem):
"""docstring for ValueBoxText"""
def __init__(self, string, parent=None):
super(ValueBoxText, self).__init__(string, parent=parent)
self.setTextInteractionFlags(Qt.NoTextInteraction)
self.ItemIsFocusable = True
self.parent = parent
self.full_text = string
self.clip_text()
def set_text_interaction(self, value):
if value and (self.textInteractionFlags() == Qt.NoTextInteraction):
self.setTextInteractionFlags(Qt.TextEditorInteraction)
self.setPlainText(self.full_text)
self.setFocus(Qt.MouseFocusReason)
self.setSelected(True)
c = self.textCursor()
c.select(QTextCursor.Document)
self.setTextCursor(c)
elif not value and (self.textInteractionFlags() == Qt.TextEditorInteraction):
self.setTextInteractionFlags(Qt.NoTextInteraction)
c = self.textCursor()
c.clearSelection()
self.setTextCursor(c)
self.clearFocus()
def clip_text(self):
if self.parent.rect().width() < self.boundingRect().topRight().x():
clipped = self.full_text[:int(self.parent.rect().width()/7)-3]
if int(self.parent.rect().width()/6)-3 == len(self.full_text)-1:
self.setPlainText(clipped)
else:
self.setPlainText(clipped+"...")
def focusOutEvent(self, event):
self.full_text = self.toPlainText()
self.set_text_interaction(False)
self.parent.set_value(self.full_text)
self.clip_text()
self.parent.refresh_label()
return super(ValueBoxText, self).focusOutEvent(event)
def keyPressEvent(self, event):
if event.key() == Qt.Key_Return or event.key() == Qt.Key_Enter:
self.full_text = self.toPlainText()
self.set_text_interaction(False)
self.parent.set_value(self.full_text)
self.clip_text()
self.parent.refresh_label()
else:
return super(ValueBoxText, self).keyPressEvent(event)
| apache-2.0 |
charleso/git-cc | git_cc/gitcc.py | 1 | 2009 | #!/usr/bin/env python
import inspect
import sys
from optparse import OptionParser
from . import checkin
from . import init
from . import rebase
from . import reset
from . import sync
from . import tag
from . import update
from . import version
commands = [
init, rebase, checkin, sync, reset, tag, update, version
]
def main():
args = sys.argv[1:]
for cmd in commands:
if args and get_module_name(cmd) == args[0]:
return invoke(cmd, args)
usage()
def invoke(cmd, args):
_args, _, _, defaults = inspect.getargspec(cmd.main)
defaults = defaults if defaults else []
diff = len(_args) - len(defaults)
_args = _args[diff:]
parser = OptionParser(description=cmd.__doc__)
for (name, default) in zip(_args, defaults):
option = {
'default': default,
'help': cmd.ARGS[name],
'dest': name,
}
if default is False:
option['action'] = "store_true"
elif default is None:
option['action'] = "store"
name = name.replace('_', '-')
parser.add_option('--' + name, **option)
(options, args) = parser.parse_args(args[1:])
if len(args) < diff:
parser.error("incorrect number of arguments")
for name in _args:
args.append(getattr(options, name))
cmd.main(*args)
def usage():
print('usage: gitcc COMMAND [ARGS]\n')
width = 11
for cmd in commands:
print(' %s %s' % (get_module_name(cmd).ljust(width),
cmd.__doc__.split('\n')[0]))
sys.exit(2)
def get_module_name(module):
"""Return the name of the given module, without the package name.
For example, if the given module is checkin, the module name is
"git_cc.checkin" and without the package name is "checkin".
Note that the given module should already have been imported.
"""
_, _, module_name = module.__name__.rpartition('.')
return module_name
if __name__ == '__main__':
main()
| gpl-2.0 |
weso/CWR-DataApi | tests/grammar/factory/record/test_work_conflict.py | 1 | 6028 | # -*- coding: utf-8 -*-
import unittest
from pyparsing import ParseException
from tests.utils.grammar import get_record_grammar
"""
CWR Work conflict grammar tests.
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestWorkValid(unittest.TestCase):
"""
Tests that the Work grammar decodes correctly formatted strings
"""
def setUp(self):
self.grammar = get_record_grammar('work_conflict')
def test_valid_full(self):
"""
Tests that the Work grammar decodes correctly formatted Work record.
This test contains all the optional fields.
"""
record = 'EXC0000123400000023TITLE OF THE WORK ENABCD0123456789T012345678920130102AB0123456789POP030201YMUSPOTMODMOVORIORITHE CONTACT A123456789ARY01220140302Y28#3 KV 297#1 Y'
result = self.grammar.parseString(record)[0]
self.assertEqual('EXC', result.record_type)
self.assertEqual(1234, result.transaction_sequence_n)
self.assertEqual(23, result.record_sequence_n)
self.assertEqual('TITLE OF THE WORK', result.title)
self.assertEqual('EN', result.language_code)
self.assertEqual('ABCD0123456789', result.submitter_work_n)
self.assertEqual('T0123456789', result.iswc)
self.assertEqual(1, result.copyright_date.month)
self.assertEqual(2, result.copyright_date.day)
self.assertEqual(2013, result.copyright_date.year)
self.assertEqual('AB0123456789', result.copyright_number)
self.assertEqual('POP', result.musical_work_distribution_category)
self.assertEqual(3, result.duration.hour)
self.assertEqual(2, result.duration.minute)
self.assertEqual(1, result.duration.second)
self.assertEqual('Y', result.recorded_indicator)
self.assertEqual('MUS', result.text_music_relationship)
self.assertEqual('POT', result.composite_type)
self.assertEqual('MOD', result.version_type)
self.assertEqual('MOV', result.excerpt_type)
self.assertEqual('ORI', result.music_arrangement)
self.assertEqual('ORI', result.lyric_adaptation)
self.assertEqual('THE CONTACT', result.contact_name)
self.assertEqual('A123456789', result.contact_id)
self.assertEqual('AR', result.work_type)
self.assertEqual(True, result.grand_rights_indicator)
self.assertEqual(12, result.composite_component_count)
self.assertEqual(2, result.date_publication_printed_edition.day)
self.assertEqual(3, result.date_publication_printed_edition.month)
self.assertEqual(2014, result.date_publication_printed_edition.year)
self.assertEqual('Y', result.exceptional_clause)
self.assertEqual('28#3', result.opus_number)
self.assertEqual('KV 297#1', result.catalogue_number)
self.assertEqual('Y', result.priority_flag)
def test_valid_minimum(self):
"""
Tests that the Work grammar decodes correctly formatted Work record.
This test contains no optional fields.
"""
record = 'EXC0000123400000023TITLE OF THE WORK ENABCD0123456789T012345678920130102AB0123456789POP030201YMUS ORIMOV THE CONTACT A123456789 00020140302Y28#3 KV 297#1 Y'
result = self.grammar.parseString(record)[0]
self.assertEqual('EXC', result.record_type)
self.assertEqual(1234, result.transaction_sequence_n)
self.assertEqual(23, result.record_sequence_n)
self.assertEqual('TITLE OF THE WORK', result.title)
self.assertEqual('EN', result.language_code)
self.assertEqual('ABCD0123456789', result.submitter_work_n)
self.assertEqual('T0123456789', result.iswc)
self.assertEqual(1, result.copyright_date.month)
self.assertEqual(2, result.copyright_date.day)
self.assertEqual(2013, result.copyright_date.year)
self.assertEqual('AB0123456789', result.copyright_number)
self.assertEqual('POP', result.musical_work_distribution_category)
self.assertEqual(3, result.duration.hour)
self.assertEqual(2, result.duration.minute)
self.assertEqual(1, result.duration.second)
self.assertEqual('Y', result.recorded_indicator)
self.assertEqual('MUS', result.text_music_relationship)
self.assertEqual(None, result.composite_type)
self.assertEqual('ORI', result.version_type)
self.assertEqual('MOV', result.excerpt_type)
self.assertEqual(None, result.music_arrangement)
self.assertEqual(None, result.lyric_adaptation)
self.assertEqual('THE CONTACT', result.contact_name)
self.assertEqual('A123456789', result.contact_id)
self.assertEqual(None, result.work_type)
self.assertEqual(None, result.grand_rights_indicator)
self.assertEqual(0, result.composite_component_count)
self.assertEqual(2, result.date_publication_printed_edition.day)
self.assertEqual(3, result.date_publication_printed_edition.month)
self.assertEqual(2014, result.date_publication_printed_edition.year)
self.assertEqual('Y', result.exceptional_clause)
self.assertEqual('28#3', result.opus_number)
self.assertEqual('KV 297#1', result.catalogue_number)
self.assertEqual('Y', result.priority_flag)
class TestIPAGrammarException(unittest.TestCase):
def setUp(self):
self.grammar = get_record_grammar('work_conflict')
def test_empty(self):
"""
Tests that a exception is thrown when the the works number is zero.
"""
record = ''
self.assertRaises(ParseException, self.grammar.parseString, record)
def test_invalid(self):
record = 'This is an invalid string'
self.assertRaises(ParseException, self.grammar.parseString, record)
| mit |
rimbalinux/MSISDNArea | django/contrib/gis/geos/prepared.py | 13 | 1039 | from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos.prototypes import prepared as capi
class PreparedGeometry(GEOSBase):
"""
A geometry that is prepared for performing certain operations.
At the moment this includes the contains covers, and intersects
operations.
"""
ptr_type = capi.PREPGEOM_PTR
def __init__(self, geom):
if not isinstance(geom, GEOSGeometry): raise TypeError
self.ptr = capi.geos_prepare(geom.ptr)
def __del__(self):
if self._ptr: capi.prepared_destroy(self._ptr)
def contains(self, other):
return capi.prepared_contains(self.ptr, other.ptr)
def contains_properly(self, other):
return capi.prepared_contains_properly(self.ptr, other.ptr)
def covers(self, other):
return capi.prepared_covers(self.ptr, other.ptr)
def intersects(self, other):
return capi.prepared_intersects(self.ptr, other.ptr)
| bsd-3-clause |
G8bao7/camelbell-server | check_oracle.py | 1 | 10171 | #!//bin/env python
#coding:utf-8
import os
import sys
import string
import time
import datetime
import MySQLdb
import cx_Oracle
import logging
import logging.config
logging.config.fileConfig("etc/logger.ini")
logger = logging.getLogger("oracle")
path='./include'
sys.path.insert(0,path)
import functions as func
import camelbell_oracle as oracle
from multiprocessing import Process;
def check_oracle(host,port,dsn,username,password,server_id,tags):
url = "%s:%s/%s" % (host, port, dsn)
logger_msg = "[BBQ]begin check oracle %s " %(url)
logger.info(logger_msg)
retry = 4
conn = None
for i in range(1,retry):
try:
logger_msg="[BBQ] oracle connect %s retry [%s]" %(url, i)
logger.info(logger_msg)
conn=cx_Oracle.connect(username,password,url) #获取connection对象
break
except Exception, e:
logger_msg="[BBQ] oracle connect %s, %s" %(url,str(e).strip('\n'))
logger.warning(logger_msg)
conn = None
continue
func.check_db_status(server_id,host,port,tags,'oracle')
if conn == None:
try:
connect=0
sql="replace into oracle_status(server_id,host,port,tags,connect) values(%s,%s,%s,%s,%s)"
param=(server_id,host,port,tags,connect)
func.mysql_exec(sql,param)
except Exception, e:
logger.error(str(e).strip('\n'))
sys.exit(1)
finally:
sys.exit(1)
try:
#get info by v$instance
connect = 1
instance_name = oracle.get_instance(conn,'instance_name')
instance_role = oracle.get_instance(conn,'instance_role')
database_role = oracle.get_database(conn,'database_role')
open_mode = oracle.get_database(conn,'open_mode')
protection_mode = oracle.get_database(conn,'protection_mode')
if database_role == 'PRIMARY':
database_role_new = 'm'
dg_stats = '-1'
dg_delay = '-1'
else:
database_role_new = 's'
dg_stats = oracle.get_dg_stats(conn)
dg_delay = oracle.get_dg_delay(conn)
instance_status = oracle.get_instance(conn,'status')
startup_time = oracle.get_instance(conn,'startup_time')
#print startup_time
#startup_time = time.strftime('%Y-%m-%d %H:%M:%S',startup_time)
#localtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
#uptime = (localtime - startup_time).seconds
#print uptime
uptime = oracle.get_instance(conn,'startup_time')
version = oracle.get_instance(conn,'version')
instance_status = oracle.get_instance(conn,'status')
database_status = oracle.get_instance(conn,'database_status')
host_name = oracle.get_instance(conn,'host_name')
archiver = oracle.get_instance(conn,'archiver')
#get info by sql count
session_total = oracle.get_sessions(conn)
session_actives = oracle.get_actives(conn)
session_waits = oracle.get_waits(conn)
#get info by v$parameters
parameters = oracle.get_parameters(conn)
processes = parameters['processes']
##get info by v$parameters
sysstat_0 = oracle.get_sysstat(conn)
time.sleep(1)
sysstat_1 = oracle.get_sysstat(conn)
session_logical_reads_persecond = sysstat_1['session logical reads']-sysstat_0['session logical reads']
physical_reads_persecond = sysstat_1['physical reads']-sysstat_0['physical reads']
physical_writes_persecond = sysstat_1['physical writes']-sysstat_0['physical writes']
physical_read_io_requests_persecond = sysstat_1['physical write total IO requests']-sysstat_0['physical write total IO requests']
physical_write_io_requests_persecond = sysstat_1['physical read IO requests']-sysstat_0['physical read IO requests']
db_block_changes_persecond = sysstat_1['db block changes']-sysstat_0['db block changes']
os_cpu_wait_time = sysstat_0['OS CPU Qt wait time']
logons_persecond = sysstat_1['logons cumulative']-sysstat_0['logons cumulative']
logons_current = sysstat_0['logons current']
opened_cursors_persecond = sysstat_1['opened cursors cumulative']-sysstat_0['opened cursors cumulative']
opened_cursors_current = sysstat_0['opened cursors current']
user_commits_persecond = sysstat_1['user commits']-sysstat_0['user commits']
user_rollbacks_persecond = sysstat_1['user rollbacks']-sysstat_0['user rollbacks']
user_calls_persecond = sysstat_1['user calls']-sysstat_0['user calls']
db_block_gets_persecond = sysstat_1['db block gets']-sysstat_0['db block gets']
#print session_logical_reads_persecond
##################### insert data to mysql server#############################
func.mysql_exec("replace into oracle_status_history SELECT *,LEFT(REPLACE(REPLACE(REPLACE(create_time,'-',''),' ',''),':',''),12) from oracle_status where host='%s' and port=%s;" % (host, port),'')
func.mysql_exec("delete from oracle_status where host='%s' and port=%s;" % (host, port),'')
sql = "insert into oracle_status(server_id,host,port,tags,connect,instance_name,instance_role,instance_status,database_role,open_mode,protection_mode,host_name,database_status,startup_time,uptime,version,archiver,session_total,session_actives,session_waits,dg_stats,dg_delay,processes,session_logical_reads_persecond,physical_reads_persecond,physical_writes_persecond,physical_read_io_requests_persecond,physical_write_io_requests_persecond,db_block_changes_persecond,os_cpu_wait_time,logons_persecond,logons_current,opened_cursors_persecond,opened_cursors_current,user_commits_persecond,user_rollbacks_persecond,user_calls_persecond,db_block_gets_persecond) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);"
param = (server_id,host,port,tags,connect,instance_name,instance_role,instance_status,database_role,open_mode,protection_mode,host_name,database_status,startup_time,uptime,version,archiver,session_total,session_actives,session_waits,dg_stats,dg_delay,processes,session_logical_reads_persecond,physical_reads_persecond,physical_writes_persecond,physical_read_io_requests_persecond,physical_write_io_requests_persecond,db_block_changes_persecond,os_cpu_wait_time,logons_persecond,logons_current,opened_cursors_persecond,opened_cursors_current,user_commits_persecond,user_rollbacks_persecond,user_calls_persecond,db_block_gets_persecond)
func.mysql_exec(sql,param)
logger.info("Finish INSERT DATA ")
func.update_db_status_init(server_id,database_role_new,version,host,port,tags)
logger.info("Finish update_db_status_init")
#check tablespace
qSql = "select 1 from oracle_tablespace where host='%s' and port=%s and create_time>=curdate() limit 1" % (host,port)
a = func.mysql_query(qSql)
if func.mysql_query(qSql) == 0:
func.mysql_exec("insert ignore into oracle_tablespace_history SELECT *,LEFT(REPLACE(REPLACE(REPLACE(create_time,'-',''),' ',''),':',''),12) from oracle_tablespace where host='%s' and port=%s;" % (host, port),'')
func.mysql_exec("delete from oracle_tablespace where host='%s' and port=%s;" % (host, port),'')
tablespace = oracle.get_tablespace(conn)
if tablespace:
for line in tablespace:
ts_name=line[0]
if igTsNames.count(ts_name) > 0:
continue
sql="insert into oracle_tablespace(server_id,host,port,tags,tablespace_name,total_size,used_size,avail_size,used_rate) values(%s,%s,%s,%s,%s,%s,%s,%s,%s)"
param=(server_id,host,port,tags,line[0],line[1],line[2],line[3],int(line[4].rstrip("%")))
logger.info(param)
func.mysql_exec(sql,param)
else:
logger.info("%s:%s today has stat oracle_tablespace. will not do" % (host,port))
logger.info("Finish oracle_tablespace")
except Exception, e:
logger.error(e)
sys.exit(1)
finally:
conn.close()
def main():
#get oracle servers list
#servers=func.mysql_query("select id,host,port,dsn,username,password,tags from db_servers_oracle where is_delete=0 and monitor=1;")
servers=func.mysql_query("select id,host,port,dsn,tags from db_servers_oracle where is_delete=0 and monitor=1;")
#++ guoqi
cnfKey = "monitor_oracle"
username = func.get_config(cnfKey,'user')
password = func.get_config(cnfKey,'passwd')
min_interval = func.get_option('min_interval')
logger.info("check oracle controller start.")
if servers:
plist = []
for row in servers:
(server_id, host, port, dsn, tags) = row
p = Process(target = check_oracle, args = (host,port,dsn,username,password,server_id,tags))
plist.append(p)
p.start()
#time.sleep(10)
#for p in plist:
# p.terminate()
for p in plist:
p.join()
else:
logger.warning("check oracle: not found any servers")
func.mysql_exec('update oracle_status set connect=0,create_time=now() where create_time<date_sub(now(), interval %s second)' % (min_interval))
func.mysql_exec('DELETE ot FROM oracle_tablespace AS ot, db_servers_oracle AS d where (d.is_delete=1 or d.monitor=0) AND ot.host=d.host AND ot.port=d.port')
func.mysql_exec('DELETE ot FROM oracle_status AS ot, db_servers_oracle AS d where (d.is_delete=1 or d.monitor=0) AND ot.host=d.host AND ot.port=d.port')
#func.mysql_exec('DELETE ds FROM oracle_status AS ds, (SELECT s.id,d.host FROM oracle_status AS s LEFT JOIN db_servers_oracle AS d ON d.is_delete=0 AND d.monitor=1 AND s.host=d.host AND s.port=d.port HAVING d.`host` IS NULL) AS t WHERE ds.id=t.id')
func.mysql_exec('DELETE ds FROM db_status AS ds, (SELECT s.id,d.host FROM db_status AS s LEFT JOIN db_servers_oracle AS d ON d.is_delete=0 AND d.monitor=1 AND s.host=d.host AND s.port=d.port WHERE db_type="oracle" HAVING d.`host` IS NULL) AS t WHERE ds.id=t.id')
logger.info("check oracle controller finished.")
if __name__=='__main__':
igTsNames = ["SYSAUX", "SYSTEM"]
main()
| gpl-3.0 |
Split-Screen/android_kernel_motorola_msm8916 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
danbeam/catapult | third_party/oauth2client/oauth2client/util.py | 60 | 5704 | #!/usr/bin/env python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Common utility library."""
__author__ = [
'rafek@google.com (Rafe Kaplan)',
'guido@google.com (Guido van Rossum)',
]
__all__ = [
'positional',
'POSITIONAL_WARNING',
'POSITIONAL_EXCEPTION',
'POSITIONAL_IGNORE',
]
import functools
import inspect
import logging
import sys
import types
import six
from six.moves import urllib
logger = logging.getLogger(__name__)
POSITIONAL_WARNING = 'WARNING'
POSITIONAL_EXCEPTION = 'EXCEPTION'
POSITIONAL_IGNORE = 'IGNORE'
POSITIONAL_SET = frozenset([POSITIONAL_WARNING, POSITIONAL_EXCEPTION,
POSITIONAL_IGNORE])
positional_parameters_enforcement = POSITIONAL_WARNING
def positional(max_positional_args):
"""A decorator to declare that only the first N arguments my be positional.
This decorator makes it easy to support Python 3 style keyword-only
parameters. For example, in Python 3 it is possible to write::
def fn(pos1, *, kwonly1=None, kwonly1=None):
...
All named parameters after ``*`` must be a keyword::
fn(10, 'kw1', 'kw2') # Raises exception.
fn(10, kwonly1='kw1') # Ok.
Example
^^^^^^^
To define a function like above, do::
@positional(1)
def fn(pos1, kwonly1=None, kwonly2=None):
...
If no default value is provided to a keyword argument, it becomes a required
keyword argument::
@positional(0)
def fn(required_kw):
...
This must be called with the keyword parameter::
fn() # Raises exception.
fn(10) # Raises exception.
fn(required_kw=10) # Ok.
When defining instance or class methods always remember to account for
``self`` and ``cls``::
class MyClass(object):
@positional(2)
def my_method(self, pos1, kwonly1=None):
...
@classmethod
@positional(2)
def my_method(cls, pos1, kwonly1=None):
...
The positional decorator behavior is controlled by
``util.positional_parameters_enforcement``, which may be set to
``POSITIONAL_EXCEPTION``, ``POSITIONAL_WARNING`` or
``POSITIONAL_IGNORE`` to raise an exception, log a warning, or do
nothing, respectively, if a declaration is violated.
Args:
max_positional_arguments: Maximum number of positional arguments. All
parameters after the this index must be keyword only.
Returns:
A decorator that prevents using arguments after max_positional_args from
being used as positional parameters.
Raises:
TypeError if a key-word only argument is provided as a positional
parameter, but only if util.positional_parameters_enforcement is set to
POSITIONAL_EXCEPTION.
"""
def positional_decorator(wrapped):
@functools.wraps(wrapped)
def positional_wrapper(*args, **kwargs):
if len(args) > max_positional_args:
plural_s = ''
if max_positional_args != 1:
plural_s = 's'
message = '%s() takes at most %d positional argument%s (%d given)' % (
wrapped.__name__, max_positional_args, plural_s, len(args))
if positional_parameters_enforcement == POSITIONAL_EXCEPTION:
raise TypeError(message)
elif positional_parameters_enforcement == POSITIONAL_WARNING:
logger.warning(message)
else: # IGNORE
pass
return wrapped(*args, **kwargs)
return positional_wrapper
if isinstance(max_positional_args, six.integer_types):
return positional_decorator
else:
args, _, _, defaults = inspect.getargspec(max_positional_args)
return positional(len(args) - len(defaults))(max_positional_args)
def scopes_to_string(scopes):
"""Converts scope value to a string.
If scopes is a string then it is simply passed through. If scopes is an
iterable then a string is returned that is all the individual scopes
concatenated with spaces.
Args:
scopes: string or iterable of strings, the scopes.
Returns:
The scopes formatted as a single string.
"""
if isinstance(scopes, six.string_types):
return scopes
else:
return ' '.join(scopes)
def dict_to_tuple_key(dictionary):
"""Converts a dictionary to a tuple that can be used as an immutable key.
The resulting key is always sorted so that logically equivalent dictionaries
always produce an identical tuple for a key.
Args:
dictionary: the dictionary to use as the key.
Returns:
A tuple representing the dictionary in it's naturally sorted ordering.
"""
return tuple(sorted(dictionary.items()))
def _add_query_parameter(url, name, value):
"""Adds a query parameter to a url.
Replaces the current value if it already exists in the URL.
Args:
url: string, url to add the query parameter to.
name: string, query parameter name.
value: string, query parameter value.
Returns:
Updated query parameter. Does not update the url if value is None.
"""
if value is None:
return url
else:
parsed = list(urllib.parse.urlparse(url))
q = dict(urllib.parse.parse_qsl(parsed[4]))
q[name] = value
parsed[4] = urllib.parse.urlencode(q)
return urllib.parse.urlunparse(parsed)
| bsd-3-clause |
beni55/viewfinder | backend/services/itunes_store.py | 13 | 7553 | # Copyright 2012 Viewfinder Inc. All Rights Reserved
"""iTunes Store service client.
Server-side code for working with iTunes/App Store in-app purchases.
Contents:
* ITunesStoreClient: Communicates with the store to verify receipts and
process renewals.
"""
import base64
import json
import logging
import time
from tornado.httpclient import AsyncHTTPClient
from viewfinder.backend.base import secrets
class ITunesStoreError(Exception):
pass
kViewfinderBundleId = 'co.viewfinder.Viewfinder'
class VerifyResponse(object):
# error codes from https://developer.apple.com/library/ios/#documentation/NetworkingInternet/Conceptual/StoreKitGuide/RenewableSubscriptions/RenewableSubscriptions.html#//apple_ref/doc/uid/TP40008267-CH4-SW2
# Note that these errors are *only* for auto-renewing subscriptions;
# non-renewing purchases have a different error table.
JSON_ERROR = 21000
MALFORMED_RECEIPT_ERROR = 21002
SIGNATURE_ERROR = 21003
PASSWORD_ERROR = 21004
SERVER_UNAVAILABLE_ERROR = 21005
EXPIRED_ERROR = 21006
SANDBOX_ON_PROD_ERROR = 21007
PROD_ON_SANDBOX_ERROR = 21008
# A "final" error means that the data is definitely invalid; a non-final
# error means the validity of the data could not be determined.
# EXPIRED_ERROR doesn't appear on either list, since it means that the
# receipt was once valid (and we'll check the expiration date separately).
# SANDBOX_ON_PROD_ERROR is final because it may indicate tampering (and
# sandbox receipts are free so there's no harm in throwing them out in
# the event of a misconfiguration), but PROD_ON_SANDBOX_ERROR is non-final
# so that if we ever misconfigure the prod servers to talk to the itunes
# sandbox we'll retry any receipts processed during that time.
FINAL_ERRORS = set([JSON_ERROR, MALFORMED_RECEIPT_ERROR, SIGNATURE_ERROR,
SANDBOX_ON_PROD_ERROR])
NON_FINAL_ERRORS = set([PASSWORD_ERROR, SERVER_UNAVAILABLE_ERROR,
PROD_ON_SANDBOX_ERROR])
def __init__(self, orig_receipt, response_body):
self.orig_receipt = orig_receipt
self.response = json.loads(response_body)
def GetStatus(self):
"""Returns the verification status code.
Status is 0 on success, or one of the error codes defined in this class.
"""
return self.response['status']
def IsValid(self):
"""Returns True if the receipt is properly formatted and signed.
Returns False if the receipt is invalid; raises an ITunesStoreError
if the validity could not be determined and will need to be retried
later.
Note that expired receipts are still considered "valid" by this function,
so the expiration date must be checked separately.
"""
status = self.GetStatus()
if status == 0 or status == VerifyResponse.EXPIRED_ERROR:
if self.GetBundleId() != kViewfinderBundleId:
logging.warning('got signed receipt for another app: %s', self.GetBundleId())
return False
return True
elif status in VerifyResponse.FINAL_ERRORS:
return False
else:
raise ITunesStoreError('Error verfiying receipt: %r' % status)
def GetLatestReceiptInfo(self):
"""Returns the latest decoded receipt info as a dict.
This may be different than the receipt originally passed in if a
renewal has occurred.
"""
if 'latest_receipt_info' in self.response:
return self.response['latest_receipt_info']
elif 'latest_expired_receipt_info' in self.response:
return self.response['latest_expired_receipt_info']
else:
return self.response['receipt']
def GetBundleId(self):
"""Returns the bundle id for this subscription.
Our bundle id is "co.viewfinder.Viewfinder".
"""
return self.GetLatestReceiptInfo()['bid']
def GetProductId(self):
"""Returns the product id for this subscription.
Product ids are created via itunes connect and encapsulate both
a subscription type and a billing cycle (i.e. if we offered
50 and 100GB subscriptions and a choice of monthly and yearly
billing, we'd have four product ids).
"""
return self.GetLatestReceiptInfo()['product_id']
def GetTransactionTime(self):
"""Returns the time at which this transaction occurred."""
time_ms = int(self.GetLatestReceiptInfo()['purchase_date_ms'])
return float(time_ms) / 1000
def GetExpirationTime(self):
"""Returns the expiration time of this subscription.
Result is a python timestamp, i.e. floating-point seconds since 1970.
"""
expires_ms = int(self.GetLatestReceiptInfo()['expires_date'])
return float(expires_ms) / 1000
def IsExpired(self):
"""Returns true if the subscription has expired."""
return self.GetExpirationTime() < time.time()
def IsRenewable(self):
"""Returns true if a renewal should be scheduled after expiration."""
return self.response['status'] == 0
def GetRenewalData(self):
"""Returns a blob of receipt data to be used when this subscription is
due for renewal.
Present only when IsRenewable is true.
"""
if 'latest_receipt' in self.response:
return base64.b64decode(self.response['latest_receipt'])
else:
return self.orig_receipt
def GetOriginalTransactionId(self):
"""Returns the original transaction id for a renewing subscription.
This id is constant for all renewals of a single subscription.
"""
return self.GetLatestReceiptInfo()['original_transaction_id']
def GetRenewalTransactionId(self):
"""Returns the transaction id for the most recent renewal transaction.
Will be equal to self.GetOriginalTransactionId() if no renewals have
happened yet.
"""
return self.GetLatestReceiptInfo()['transaction_id']
def ToString(self):
return json.dumps(dict(orig_receipt=self.orig_receipt,
response=json.dumps(self.response)))
@classmethod
def FromString(cls, s):
data = json.loads(s)
return cls(data['orig_receipt'], data['response'])
class ITunesStoreClient(object):
_SETTINGS = {
'dev': {
'verify_url': 'https://sandbox.itunes.apple.com/verifyReceipt',
},
'prod': {
'verify_url': 'https://buy.itunes.apple.com/verifyReceipt',
},
}
_instance_map = dict()
def __init__(self, environment='dev', http_client=None):
self._settings = ITunesStoreClient._SETTINGS[environment]
if http_client is None:
self.http_client = AsyncHTTPClient()
else:
self.http_client = http_client
def VerifyReceipt(self, receipt_data, callback):
"""Verifies a receipt. Callback receives a VerifyResponse."""
def _OnFetch(response):
response.rethrow()
callback(VerifyResponse(receipt_data, response.body))
request = {
'receipt-data': base64.b64encode(receipt_data),
'password': secrets.GetSecret('itunes_subscription_secret'),
}
self.http_client.fetch(self._settings['verify_url'], method='POST',
body=json.dumps(request), callback=_OnFetch)
@staticmethod
def Instance(environment):
assert environment in ITunesStoreClient._instance_map, '%s iTunes instance not available' % environment
return ITunesStoreClient._instance_map[environment]
@staticmethod
def SetInstance(environment, itunes_client):
"""Sets a new instance for testing."""
ITunesStoreClient._instance_map[environment] = itunes_client
@staticmethod
def ClearInstance(environment):
"""Removes a previously-set instance."""
del ITunesStoreClient._instance_map[environment]
| apache-2.0 |
CubicERP/odoo | addons/account/wizard/account_move_line_unreconcile_select.py | 385 | 1864 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_move_line_unreconcile_select(osv.osv_memory):
_name = "account.move.line.unreconcile.select"
_description = "Unreconciliation"
_columns ={
'account_id': fields.many2one('account.account','Account',required=True),
}
def action_open_window(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, context=context)[0]
return {
'domain': "[('account_id','=',%d),('reconcile_id','<>',False),('state','<>','draft')]" % data['account_id'],
'name': 'Unreconciliation',
'view_type': 'form',
'view_mode': 'tree,form',
'view_id': False,
'res_model': 'account.move.line',
'type': 'ir.actions.act_window'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
qgis/QGIS | python/plugins/otbprovider/OtbProviderPlugin.py | 9 | 1557 | # -*- coding: utf-8 -*-
"""
***************************************************************************
OtbProviderPlugin.py
---------------------
Date : June 2021
Copyright : (C) 2021 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'June 2021'
__copyright__ = '(C) 2021, Alexander Bruy'
from qgis.core import QgsApplication, QgsRuntimeProfiler
with QgsRuntimeProfiler.profile('Import OTB Provider'):
from otbprovider.OtbAlgorithmProvider import OtbAlgorithmProvider
class OtbProviderPlugin:
def __init__(self):
self.provider = OtbAlgorithmProvider()
def initProcessing(self):
QgsApplication.processingRegistry().addProvider(self.provider)
def initGui(self):
self.initProcessing()
def unload(self):
QgsApplication.processingRegistry().removeProvider(self.provider)
| gpl-2.0 |
rahul67/hue | desktop/core/ext-py/boto-2.38.0/boto/roboto/awsqueryrequest.py | 153 | 18579 | # Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import sys
import os
import boto
import optparse
import copy
import boto.exception
import boto.roboto.awsqueryservice
import bdb
import traceback
try:
import epdb as debugger
except ImportError:
import pdb as debugger
def boto_except_hook(debugger_flag, debug_flag):
def excepthook(typ, value, tb):
if typ is bdb.BdbQuit:
sys.exit(1)
sys.excepthook = sys.__excepthook__
if debugger_flag and sys.stdout.isatty() and sys.stdin.isatty():
if debugger.__name__ == 'epdb':
debugger.post_mortem(tb, typ, value)
else:
debugger.post_mortem(tb)
elif debug_flag:
print(traceback.print_tb(tb))
sys.exit(1)
else:
print(value)
sys.exit(1)
return excepthook
class Line(object):
def __init__(self, fmt, data, label):
self.fmt = fmt
self.data = data
self.label = label
self.line = '%s\t' % label
self.printed = False
def append(self, datum):
self.line += '%s\t' % datum
def print_it(self):
if not self.printed:
print(self.line)
self.printed = True
class RequiredParamError(boto.exception.BotoClientError):
def __init__(self, required):
self.required = required
s = 'Required parameters are missing: %s' % self.required
super(RequiredParamError, self).__init__(s)
class EncoderError(boto.exception.BotoClientError):
def __init__(self, error_msg):
s = 'Error encoding value (%s)' % error_msg
super(EncoderError, self).__init__(s)
class FilterError(boto.exception.BotoClientError):
def __init__(self, filters):
self.filters = filters
s = 'Unknown filters: %s' % self.filters
super(FilterError, self).__init__(s)
class Encoder(object):
@classmethod
def encode(cls, p, rp, v, label=None):
if p.name.startswith('_'):
return
try:
mthd = getattr(cls, 'encode_'+p.ptype)
mthd(p, rp, v, label)
except AttributeError:
raise EncoderError('Unknown type: %s' % p.ptype)
@classmethod
def encode_string(cls, p, rp, v, l):
if l:
label = l
else:
label = p.name
rp[label] = v
encode_file = encode_string
encode_enum = encode_string
@classmethod
def encode_integer(cls, p, rp, v, l):
if l:
label = l
else:
label = p.name
rp[label] = '%d' % v
@classmethod
def encode_boolean(cls, p, rp, v, l):
if l:
label = l
else:
label = p.name
if v:
v = 'true'
else:
v = 'false'
rp[label] = v
@classmethod
def encode_datetime(cls, p, rp, v, l):
if l:
label = l
else:
label = p.name
rp[label] = v
@classmethod
def encode_array(cls, p, rp, v, l):
v = boto.utils.mklist(v)
if l:
label = l
else:
label = p.name
label = label + '.%d'
for i, value in enumerate(v):
rp[label%(i+1)] = value
class AWSQueryRequest(object):
ServiceClass = None
Description = ''
Params = []
Args = []
Filters = []
Response = {}
CLITypeMap = {'string' : 'string',
'integer' : 'int',
'int' : 'int',
'enum' : 'choice',
'datetime' : 'string',
'dateTime' : 'string',
'file' : 'string',
'boolean' : None}
@classmethod
def name(cls):
return cls.__name__
def __init__(self, **args):
self.args = args
self.parser = None
self.cli_options = None
self.cli_args = None
self.cli_output_format = None
self.connection = None
self.list_markers = []
self.item_markers = []
self.request_params = {}
self.connection_args = None
def __repr__(self):
return self.name()
def get_connection(self, **args):
if self.connection is None:
self.connection = self.ServiceClass(**args)
return self.connection
@property
def status(self):
retval = None
if self.http_response is not None:
retval = self.http_response.status
return retval
@property
def reason(self):
retval = None
if self.http_response is not None:
retval = self.http_response.reason
return retval
@property
def request_id(self):
retval = None
if self.aws_response is not None:
retval = getattr(self.aws_response, 'requestId')
return retval
def process_filters(self):
filters = self.args.get('filters', [])
filter_names = [f['name'] for f in self.Filters]
unknown_filters = [f for f in filters if f not in filter_names]
if unknown_filters:
raise FilterError('Unknown filters: %s' % unknown_filters)
for i, filter in enumerate(self.Filters):
name = filter['name']
if name in filters:
self.request_params['Filter.%d.Name' % (i+1)] = name
for j, value in enumerate(boto.utils.mklist(filters[name])):
Encoder.encode(filter, self.request_params, value,
'Filter.%d.Value.%d' % (i+1, j+1))
def process_args(self, **args):
"""
Responsible for walking through Params defined for the request and:
* Matching them with keyword parameters passed to the request
constructor or via the command line.
* Checking to see if all required parameters have been specified
and raising an exception, if not.
* Encoding each value into the set of request parameters that will
be sent in the request to the AWS service.
"""
self.args.update(args)
self.connection_args = copy.copy(self.args)
if 'debug' in self.args and self.args['debug'] >= 2:
boto.set_stream_logger(self.name())
required = [p.name for p in self.Params+self.Args if not p.optional]
for param in self.Params+self.Args:
if param.long_name:
python_name = param.long_name.replace('-', '_')
else:
python_name = boto.utils.pythonize_name(param.name, '_')
value = None
if python_name in self.args:
value = self.args[python_name]
if value is None:
value = param.default
if value is not None:
if param.name in required:
required.remove(param.name)
if param.request_param:
if param.encoder:
param.encoder(param, self.request_params, value)
else:
Encoder.encode(param, self.request_params, value)
if python_name in self.args:
del self.connection_args[python_name]
if required:
l = []
for p in self.Params+self.Args:
if p.name in required:
if p.short_name and p.long_name:
l.append('(%s, %s)' % (p.optparse_short_name,
p.optparse_long_name))
elif p.short_name:
l.append('(%s)' % p.optparse_short_name)
else:
l.append('(%s)' % p.optparse_long_name)
raise RequiredParamError(','.join(l))
boto.log.debug('request_params: %s' % self.request_params)
self.process_markers(self.Response)
def process_markers(self, fmt, prev_name=None):
if fmt and fmt['type'] == 'object':
for prop in fmt['properties']:
self.process_markers(prop, fmt['name'])
elif fmt and fmt['type'] == 'array':
self.list_markers.append(prev_name)
self.item_markers.append(fmt['name'])
def send(self, verb='GET', **args):
self.process_args(**args)
self.process_filters()
conn = self.get_connection(**self.connection_args)
self.http_response = conn.make_request(self.name(),
self.request_params,
verb=verb)
self.body = self.http_response.read()
boto.log.debug(self.body)
if self.http_response.status == 200:
self.aws_response = boto.jsonresponse.Element(list_marker=self.list_markers,
item_marker=self.item_markers)
h = boto.jsonresponse.XmlHandler(self.aws_response, self)
h.parse(self.body)
return self.aws_response
else:
boto.log.error('%s %s' % (self.http_response.status,
self.http_response.reason))
boto.log.error('%s' % self.body)
raise conn.ResponseError(self.http_response.status,
self.http_response.reason,
self.body)
def add_standard_options(self):
group = optparse.OptionGroup(self.parser, 'Standard Options')
# add standard options that all commands get
group.add_option('-D', '--debug', action='store_true',
help='Turn on all debugging output')
group.add_option('--debugger', action='store_true',
default=False,
help='Enable interactive debugger on error')
group.add_option('-U', '--url', action='store',
help='Override service URL with value provided')
group.add_option('--region', action='store',
help='Name of the region to connect to')
group.add_option('-I', '--access-key-id', action='store',
help='Override access key value')
group.add_option('-S', '--secret-key', action='store',
help='Override secret key value')
group.add_option('--version', action='store_true',
help='Display version string')
if self.Filters:
self.group.add_option('--help-filters', action='store_true',
help='Display list of available filters')
self.group.add_option('--filter', action='append',
metavar=' name=value',
help='A filter for limiting the results')
self.parser.add_option_group(group)
def process_standard_options(self, options, args, d):
if hasattr(options, 'help_filters') and options.help_filters:
print('Available filters:')
for filter in self.Filters:
print('%s\t%s' % (filter.name, filter.doc))
sys.exit(0)
if options.debug:
self.args['debug'] = 2
if options.url:
self.args['url'] = options.url
if options.region:
self.args['region'] = options.region
if options.access_key_id:
self.args['aws_access_key_id'] = options.access_key_id
if options.secret_key:
self.args['aws_secret_access_key'] = options.secret_key
if options.version:
# TODO - Where should the version # come from?
print('version x.xx')
exit(0)
sys.excepthook = boto_except_hook(options.debugger,
options.debug)
def get_usage(self):
s = 'usage: %prog [options] '
l = [ a.long_name for a in self.Args ]
s += ' '.join(l)
for a in self.Args:
if a.doc:
s += '\n\n\t%s - %s' % (a.long_name, a.doc)
return s
def build_cli_parser(self):
self.parser = optparse.OptionParser(description=self.Description,
usage=self.get_usage())
self.add_standard_options()
for param in self.Params:
ptype = action = choices = None
if param.ptype in self.CLITypeMap:
ptype = self.CLITypeMap[param.ptype]
action = 'store'
if param.ptype == 'boolean':
action = 'store_true'
elif param.ptype == 'array':
if len(param.items) == 1:
ptype = param.items[0]['type']
action = 'append'
elif param.cardinality != 1:
action = 'append'
if ptype or action == 'store_true':
if param.short_name:
self.parser.add_option(param.optparse_short_name,
param.optparse_long_name,
action=action, type=ptype,
choices=param.choices,
help=param.doc)
elif param.long_name:
self.parser.add_option(param.optparse_long_name,
action=action, type=ptype,
choices=param.choices,
help=param.doc)
def do_cli(self):
if not self.parser:
self.build_cli_parser()
self.cli_options, self.cli_args = self.parser.parse_args()
d = {}
self.process_standard_options(self.cli_options, self.cli_args, d)
for param in self.Params:
if param.long_name:
p_name = param.long_name.replace('-', '_')
else:
p_name = boto.utils.pythonize_name(param.name)
value = getattr(self.cli_options, p_name)
if param.ptype == 'file' and value:
if value == '-':
value = sys.stdin.read()
else:
path = os.path.expanduser(value)
path = os.path.expandvars(path)
if os.path.isfile(path):
fp = open(path)
value = fp.read()
fp.close()
else:
self.parser.error('Unable to read file: %s' % path)
d[p_name] = value
for arg in self.Args:
if arg.long_name:
p_name = arg.long_name.replace('-', '_')
else:
p_name = boto.utils.pythonize_name(arg.name)
value = None
if arg.cardinality == 1:
if len(self.cli_args) >= 1:
value = self.cli_args[0]
else:
value = self.cli_args
d[p_name] = value
self.args.update(d)
if hasattr(self.cli_options, 'filter') and self.cli_options.filter:
d = {}
for filter in self.cli_options.filter:
name, value = filter.split('=')
d[name] = value
if 'filters' in self.args:
self.args['filters'].update(d)
else:
self.args['filters'] = d
try:
response = self.main()
self.cli_formatter(response)
except RequiredParamError as e:
print(e)
sys.exit(1)
except self.ServiceClass.ResponseError as err:
print('Error(%s): %s' % (err.error_code, err.error_message))
sys.exit(1)
except boto.roboto.awsqueryservice.NoCredentialsError as err:
print('Unable to find credentials.')
sys.exit(1)
except Exception as e:
print(e)
sys.exit(1)
def _generic_cli_formatter(self, fmt, data, label=''):
if fmt['type'] == 'object':
for prop in fmt['properties']:
if 'name' in fmt:
if fmt['name'] in data:
data = data[fmt['name']]
if fmt['name'] in self.list_markers:
label = fmt['name']
if label[-1] == 's':
label = label[0:-1]
label = label.upper()
self._generic_cli_formatter(prop, data, label)
elif fmt['type'] == 'array':
for item in data:
line = Line(fmt, item, label)
if isinstance(item, dict):
for field_name in item:
line.append(item[field_name])
elif isinstance(item, basestring):
line.append(item)
line.print_it()
def cli_formatter(self, data):
"""
This method is responsible for formatting the output for the
command line interface. The default behavior is to call the
generic CLI formatter which attempts to print something
reasonable. If you want specific formatting, you should
override this method and do your own thing.
:type data: dict
:param data: The data returned by AWS.
"""
if data:
self._generic_cli_formatter(self.Response, data)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.